1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/BasicAliasAnalysis.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Analysis/PhiValues.h"
32 #include "llvm/IR/Argument.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallSite.h"
35 #include "llvm/IR/Constant.h"
36 #include "llvm/IR/Constants.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Dominators.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GetElementPtrTypeIterator.h"
42 #include "llvm/IR/GlobalAlias.h"
43 #include "llvm/IR/GlobalVariable.h"
44 #include "llvm/IR/InstrTypes.h"
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/IR/Intrinsics.h"
49 #include "llvm/IR/Metadata.h"
50 #include "llvm/IR/Operator.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/User.h"
53 #include "llvm/IR/Value.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/KnownBits.h"
64 #define DEBUG_TYPE "basicaa"
68 /// Enable analysis of recursive PHI nodes.
69 static cl::opt
<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden
,
71 /// SearchLimitReached / SearchTimes shows how often the limit of
72 /// to decompose GEPs is reached. It will affect the precision
73 /// of basic alias analysis.
74 STATISTIC(SearchLimitReached
, "Number of times the limit to "
75 "decompose GEPs is reached");
76 STATISTIC(SearchTimes
, "Number of times a GEP is decomposed");
78 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
79 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
80 /// careful with value equivalence. We use reachability to make sure a value
81 /// cannot be involved in a cycle.
82 const unsigned MaxNumPhiBBsValueReachabilityCheck
= 20;
84 // The max limit of the search depth in DecomposeGEPExpression() and
85 // GetUnderlyingObject(), both functions need to use the same search
86 // depth otherwise the algorithm in aliasGEP will assert.
87 static const unsigned MaxLookupSearchDepth
= 6;
89 bool BasicAAResult::invalidate(Function
&Fn
, const PreservedAnalyses
&PA
,
90 FunctionAnalysisManager::Invalidator
&Inv
) {
91 // We don't care if this analysis itself is preserved, it has no state. But
92 // we need to check that the analyses it depends on have been. Note that we
93 // may be created without handles to some analyses and in that case don't
95 if (Inv
.invalidate
<AssumptionAnalysis
>(Fn
, PA
) ||
96 (DT
&& Inv
.invalidate
<DominatorTreeAnalysis
>(Fn
, PA
)) ||
97 (LI
&& Inv
.invalidate
<LoopAnalysis
>(Fn
, PA
)) ||
98 (PV
&& Inv
.invalidate
<PhiValuesAnalysis
>(Fn
, PA
)))
101 // Otherwise this analysis result remains valid.
105 //===----------------------------------------------------------------------===//
107 //===----------------------------------------------------------------------===//
109 /// Returns true if the pointer is to a function-local object that never
110 /// escapes from the function.
111 static bool isNonEscapingLocalObject(const Value
*V
) {
112 // If this is a local allocation, check to see if it escapes.
113 if (isa
<AllocaInst
>(V
) || isNoAliasCall(V
))
114 // Set StoreCaptures to True so that we can assume in our callers that the
115 // pointer is not the result of a load instruction. Currently
116 // PointerMayBeCaptured doesn't have any special analysis for the
117 // StoreCaptures=false case; if it did, our callers could be refined to be
119 return !PointerMayBeCaptured(V
, false, /*StoreCaptures=*/true);
121 // If this is an argument that corresponds to a byval or noalias argument,
122 // then it has not escaped before entering the function. Check if it escapes
123 // inside the function.
124 if (const Argument
*A
= dyn_cast
<Argument
>(V
))
125 if (A
->hasByValAttr() || A
->hasNoAliasAttr())
126 // Note even if the argument is marked nocapture, we still need to check
127 // for copies made inside the function. The nocapture attribute only
128 // specifies that there are no copies made that outlive the function.
129 return !PointerMayBeCaptured(V
, false, /*StoreCaptures=*/true);
134 /// Returns true if the pointer is one which would have been considered an
135 /// escape by isNonEscapingLocalObject.
136 static bool isEscapeSource(const Value
*V
) {
137 if (ImmutableCallSite(V
))
140 if (isa
<Argument
>(V
))
143 // The load case works because isNonEscapingLocalObject considers all
144 // stores to be escapes (it passes true for the StoreCaptures argument
145 // to PointerMayBeCaptured).
146 if (isa
<LoadInst
>(V
))
152 /// Returns the size of the object specified by V or UnknownSize if unknown.
153 static uint64_t getObjectSize(const Value
*V
, const DataLayout
&DL
,
154 const TargetLibraryInfo
&TLI
,
156 bool RoundToAlign
= false) {
159 Opts
.RoundToAlign
= RoundToAlign
;
160 Opts
.NullIsUnknownSize
= NullIsValidLoc
;
161 if (getObjectSize(V
, Size
, DL
, &TLI
, Opts
))
163 return MemoryLocation::UnknownSize
;
166 /// Returns true if we can prove that the object specified by V is smaller than
168 static bool isObjectSmallerThan(const Value
*V
, uint64_t Size
,
169 const DataLayout
&DL
,
170 const TargetLibraryInfo
&TLI
,
171 bool NullIsValidLoc
) {
172 // Note that the meanings of the "object" are slightly different in the
173 // following contexts:
174 // c1: llvm::getObjectSize()
175 // c2: llvm.objectsize() intrinsic
176 // c3: isObjectSmallerThan()
177 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
178 // refers to the "entire object".
180 // Consider this example:
181 // char *p = (char*)malloc(100)
184 // In the context of c1 and c2, the "object" pointed by q refers to the
185 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
187 // However, in the context of c3, the "object" refers to the chunk of memory
188 // being allocated. So, the "object" has 100 bytes, and q points to the middle
189 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
190 // parameter, before the llvm::getObjectSize() is called to get the size of
191 // entire object, we should:
192 // - either rewind the pointer q to the base-address of the object in
193 // question (in this case rewind to p), or
194 // - just give up. It is up to caller to make sure the pointer is pointing
195 // to the base address the object.
197 // We go for 2nd option for simplicity.
198 if (!isIdentifiedObject(V
))
201 // This function needs to use the aligned object size because we allow
202 // reads a bit past the end given sufficient alignment.
203 uint64_t ObjectSize
= getObjectSize(V
, DL
, TLI
, NullIsValidLoc
,
204 /*RoundToAlign*/ true);
206 return ObjectSize
!= MemoryLocation::UnknownSize
&& ObjectSize
< Size
;
209 /// Returns true if we can prove that the object specified by V has size Size.
210 static bool isObjectSize(const Value
*V
, uint64_t Size
, const DataLayout
&DL
,
211 const TargetLibraryInfo
&TLI
, bool NullIsValidLoc
) {
212 uint64_t ObjectSize
= getObjectSize(V
, DL
, TLI
, NullIsValidLoc
);
213 return ObjectSize
!= MemoryLocation::UnknownSize
&& ObjectSize
== Size
;
216 //===----------------------------------------------------------------------===//
217 // GetElementPtr Instruction Decomposition and Analysis
218 //===----------------------------------------------------------------------===//
220 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
221 /// B are constant integers.
223 /// Returns the scale and offset values as APInts and return V as a Value*, and
224 /// return whether we looked through any sign or zero extends. The incoming
225 /// Value is known to have IntegerType, and it may already be sign or zero
228 /// Note that this looks through extends, so the high bits may not be
229 /// represented in the result.
230 /*static*/ const Value
*BasicAAResult::GetLinearExpression(
231 const Value
*V
, APInt
&Scale
, APInt
&Offset
, unsigned &ZExtBits
,
232 unsigned &SExtBits
, const DataLayout
&DL
, unsigned Depth
,
233 AssumptionCache
*AC
, DominatorTree
*DT
, bool &NSW
, bool &NUW
) {
234 assert(V
->getType()->isIntegerTy() && "Not an integer value");
236 // Limit our recursion depth.
243 if (const ConstantInt
*Const
= dyn_cast
<ConstantInt
>(V
)) {
244 // If it's a constant, just convert it to an offset and remove the variable.
245 // If we've been called recursively, the Offset bit width will be greater
246 // than the constant's (the Offset's always as wide as the outermost call),
247 // so we'll zext here and process any extension in the isa<SExtInst> &
248 // isa<ZExtInst> cases below.
249 Offset
+= Const
->getValue().zextOrSelf(Offset
.getBitWidth());
250 assert(Scale
== 0 && "Constant values don't have a scale");
254 if (const BinaryOperator
*BOp
= dyn_cast
<BinaryOperator
>(V
)) {
255 if (ConstantInt
*RHSC
= dyn_cast
<ConstantInt
>(BOp
->getOperand(1))) {
256 // If we've been called recursively, then Offset and Scale will be wider
257 // than the BOp operands. We'll always zext it here as we'll process sign
258 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
259 APInt RHS
= RHSC
->getValue().zextOrSelf(Offset
.getBitWidth());
261 switch (BOp
->getOpcode()) {
263 // We don't understand this instruction, so we can't decompose it any
268 case Instruction::Or
:
269 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
271 if (!MaskedValueIsZero(BOp
->getOperand(0), RHSC
->getValue(), DL
, 0, AC
,
278 case Instruction::Add
:
279 V
= GetLinearExpression(BOp
->getOperand(0), Scale
, Offset
, ZExtBits
,
280 SExtBits
, DL
, Depth
+ 1, AC
, DT
, NSW
, NUW
);
283 case Instruction::Sub
:
284 V
= GetLinearExpression(BOp
->getOperand(0), Scale
, Offset
, ZExtBits
,
285 SExtBits
, DL
, Depth
+ 1, AC
, DT
, NSW
, NUW
);
288 case Instruction::Mul
:
289 V
= GetLinearExpression(BOp
->getOperand(0), Scale
, Offset
, ZExtBits
,
290 SExtBits
, DL
, Depth
+ 1, AC
, DT
, NSW
, NUW
);
294 case Instruction::Shl
:
295 V
= GetLinearExpression(BOp
->getOperand(0), Scale
, Offset
, ZExtBits
,
296 SExtBits
, DL
, Depth
+ 1, AC
, DT
, NSW
, NUW
);
298 // We're trying to linearize an expression of the kind:
300 // where the shift count exceeds the bitwidth of the type.
301 // We can't decompose this further (the expression would return
303 if (Offset
.getBitWidth() < RHS
.getLimitedValue() ||
304 Scale
.getBitWidth() < RHS
.getLimitedValue()) {
310 Offset
<<= RHS
.getLimitedValue();
311 Scale
<<= RHS
.getLimitedValue();
312 // the semantics of nsw and nuw for left shifts don't match those of
313 // multiplications, so we won't propagate them.
318 if (isa
<OverflowingBinaryOperator
>(BOp
)) {
319 NUW
&= BOp
->hasNoUnsignedWrap();
320 NSW
&= BOp
->hasNoSignedWrap();
326 // Since GEP indices are sign extended anyway, we don't care about the high
327 // bits of a sign or zero extended value - just scales and offsets. The
328 // extensions have to be consistent though.
329 if (isa
<SExtInst
>(V
) || isa
<ZExtInst
>(V
)) {
330 Value
*CastOp
= cast
<CastInst
>(V
)->getOperand(0);
331 unsigned NewWidth
= V
->getType()->getPrimitiveSizeInBits();
332 unsigned SmallWidth
= CastOp
->getType()->getPrimitiveSizeInBits();
333 unsigned OldZExtBits
= ZExtBits
, OldSExtBits
= SExtBits
;
334 const Value
*Result
=
335 GetLinearExpression(CastOp
, Scale
, Offset
, ZExtBits
, SExtBits
, DL
,
336 Depth
+ 1, AC
, DT
, NSW
, NUW
);
338 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
339 // by just incrementing the number of bits we've extended by.
340 unsigned ExtendedBy
= NewWidth
- SmallWidth
;
342 if (isa
<SExtInst
>(V
) && ZExtBits
== 0) {
343 // sext(sext(%x, a), b) == sext(%x, a + b)
346 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
347 // into sext(%x) + sext(c). We'll sext the Offset ourselves:
348 unsigned OldWidth
= Offset
.getBitWidth();
349 Offset
= Offset
.trunc(SmallWidth
).sext(NewWidth
).zextOrSelf(OldWidth
);
351 // We may have signed-wrapped, so don't decompose sext(%x + c) into
352 // sext(%x) + sext(c)
356 ZExtBits
= OldZExtBits
;
357 SExtBits
= OldSExtBits
;
359 SExtBits
+= ExtendedBy
;
361 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
364 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
365 // zext(%x) + zext(c)
369 ZExtBits
= OldZExtBits
;
370 SExtBits
= OldSExtBits
;
372 ZExtBits
+= ExtendedBy
;
383 /// To ensure a pointer offset fits in an integer of size PointerSize
384 /// (in bits) when that size is smaller than 64. This is an issue in
385 /// particular for 32b programs with negative indices that rely on two's
386 /// complement wrap-arounds for precise alias information.
387 static int64_t adjustToPointerSize(int64_t Offset
, unsigned PointerSize
) {
388 assert(PointerSize
<= 64 && "Invalid PointerSize!");
389 unsigned ShiftBits
= 64 - PointerSize
;
390 return (int64_t)((uint64_t)Offset
<< ShiftBits
) >> ShiftBits
;
393 /// If V is a symbolic pointer expression, decompose it into a base pointer
394 /// with a constant offset and a number of scaled symbolic offsets.
396 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
397 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
398 /// specified amount, but which may have other unrepresented high bits. As
399 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
401 /// When DataLayout is around, this function is capable of analyzing everything
402 /// that GetUnderlyingObject can look through. To be able to do that
403 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
404 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
405 /// through pointer casts.
406 bool BasicAAResult::DecomposeGEPExpression(const Value
*V
,
407 DecomposedGEP
&Decomposed
, const DataLayout
&DL
, AssumptionCache
*AC
,
409 // Limit recursion depth to limit compile time in crazy cases.
410 unsigned MaxLookup
= MaxLookupSearchDepth
;
413 Decomposed
.StructOffset
= 0;
414 Decomposed
.OtherOffset
= 0;
415 Decomposed
.VarIndices
.clear();
417 // See if this is a bitcast or GEP.
418 const Operator
*Op
= dyn_cast
<Operator
>(V
);
420 // The only non-operator case we can handle are GlobalAliases.
421 if (const GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(V
)) {
422 if (!GA
->isInterposable()) {
423 V
= GA
->getAliasee();
431 if (Op
->getOpcode() == Instruction::BitCast
||
432 Op
->getOpcode() == Instruction::AddrSpaceCast
) {
433 V
= Op
->getOperand(0);
437 const GEPOperator
*GEPOp
= dyn_cast
<GEPOperator
>(Op
);
439 if (auto CS
= ImmutableCallSite(V
)) {
440 // CaptureTracking can know about special capturing properties of some
441 // intrinsics like launder.invariant.group, that can't be expressed with
442 // the attributes, but have properties like returning aliasing pointer.
443 // Because some analysis may assume that nocaptured pointer is not
444 // returned from some special intrinsic (because function would have to
445 // be marked with returns attribute), it is crucial to use this function
446 // because it should be in sync with CaptureTracking. Not using it may
447 // cause weird miscompilations where 2 aliasing pointers are assumed to
449 if (auto *RP
= getArgumentAliasingToReturnedPointer(CS
)) {
455 // If it's not a GEP, hand it off to SimplifyInstruction to see if it
456 // can come up with something. This matches what GetUnderlyingObject does.
457 if (const Instruction
*I
= dyn_cast
<Instruction
>(V
))
458 // TODO: Get a DominatorTree and AssumptionCache and use them here
459 // (these are both now available in this function, but this should be
460 // updated when GetUnderlyingObject is updated). TLI should be
462 if (const Value
*Simplified
=
463 SimplifyInstruction(const_cast<Instruction
*>(I
), DL
)) {
472 // Don't attempt to analyze GEPs over unsized objects.
473 if (!GEPOp
->getSourceElementType()->isSized()) {
478 unsigned AS
= GEPOp
->getPointerAddressSpace();
479 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
480 gep_type_iterator GTI
= gep_type_begin(GEPOp
);
481 unsigned PointerSize
= DL
.getPointerSizeInBits(AS
);
482 // Assume all GEP operands are constants until proven otherwise.
483 bool GepHasConstantOffset
= true;
484 for (User::const_op_iterator I
= GEPOp
->op_begin() + 1, E
= GEPOp
->op_end();
485 I
!= E
; ++I
, ++GTI
) {
486 const Value
*Index
= *I
;
487 // Compute the (potentially symbolic) offset in bytes for this index.
488 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
489 // For a struct, add the member offset.
490 unsigned FieldNo
= cast
<ConstantInt
>(Index
)->getZExtValue();
494 Decomposed
.StructOffset
+=
495 DL
.getStructLayout(STy
)->getElementOffset(FieldNo
);
499 // For an array/pointer, add the element offset, explicitly scaled.
500 if (const ConstantInt
*CIdx
= dyn_cast
<ConstantInt
>(Index
)) {
503 Decomposed
.OtherOffset
+=
504 DL
.getTypeAllocSize(GTI
.getIndexedType()) * CIdx
->getSExtValue();
508 GepHasConstantOffset
= false;
510 uint64_t Scale
= DL
.getTypeAllocSize(GTI
.getIndexedType());
511 unsigned ZExtBits
= 0, SExtBits
= 0;
513 // If the integer type is smaller than the pointer size, it is implicitly
514 // sign extended to pointer size.
515 unsigned Width
= Index
->getType()->getIntegerBitWidth();
516 if (PointerSize
> Width
)
517 SExtBits
+= PointerSize
- Width
;
519 // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
520 APInt
IndexScale(Width
, 0), IndexOffset(Width
, 0);
521 bool NSW
= true, NUW
= true;
522 Index
= GetLinearExpression(Index
, IndexScale
, IndexOffset
, ZExtBits
,
523 SExtBits
, DL
, 0, AC
, DT
, NSW
, NUW
);
525 // All GEP math happens in the width of the pointer type,
526 // so we can truncate the value to 64-bits as we don't handle
527 // currently pointers larger than 64 bits and we would crash
528 // later. TODO: Make `Scale` an APInt to avoid this problem.
529 if (IndexScale
.getBitWidth() > 64)
530 IndexScale
= IndexScale
.sextOrTrunc(64);
532 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
533 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
534 Decomposed
.OtherOffset
+= IndexOffset
.getSExtValue() * Scale
;
535 Scale
*= IndexScale
.getSExtValue();
537 // If we already had an occurrence of this index variable, merge this
538 // scale into it. For example, we want to handle:
539 // A[x][x] -> x*16 + x*4 -> x*20
540 // This also ensures that 'x' only appears in the index list once.
541 for (unsigned i
= 0, e
= Decomposed
.VarIndices
.size(); i
!= e
; ++i
) {
542 if (Decomposed
.VarIndices
[i
].V
== Index
&&
543 Decomposed
.VarIndices
[i
].ZExtBits
== ZExtBits
&&
544 Decomposed
.VarIndices
[i
].SExtBits
== SExtBits
) {
545 Scale
+= Decomposed
.VarIndices
[i
].Scale
;
546 Decomposed
.VarIndices
.erase(Decomposed
.VarIndices
.begin() + i
);
551 // Make sure that we have a scale that makes sense for this target's
553 Scale
= adjustToPointerSize(Scale
, PointerSize
);
556 VariableGEPIndex Entry
= {Index
, ZExtBits
, SExtBits
,
557 static_cast<int64_t>(Scale
)};
558 Decomposed
.VarIndices
.push_back(Entry
);
562 // Take care of wrap-arounds
563 if (GepHasConstantOffset
) {
564 Decomposed
.StructOffset
=
565 adjustToPointerSize(Decomposed
.StructOffset
, PointerSize
);
566 Decomposed
.OtherOffset
=
567 adjustToPointerSize(Decomposed
.OtherOffset
, PointerSize
);
570 // Analyze the base pointer next.
571 V
= GEPOp
->getOperand(0);
572 } while (--MaxLookup
);
574 // If the chain of expressions is too deep, just return early.
576 SearchLimitReached
++;
580 /// Returns whether the given pointer value points to memory that is local to
581 /// the function, with global constants being considered local to all
583 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation
&Loc
,
585 assert(Visited
.empty() && "Visited must be cleared after use!");
587 unsigned MaxLookup
= 8;
588 SmallVector
<const Value
*, 16> Worklist
;
589 Worklist
.push_back(Loc
.Ptr
);
591 const Value
*V
= GetUnderlyingObject(Worklist
.pop_back_val(), DL
);
592 if (!Visited
.insert(V
).second
) {
594 return AAResultBase::pointsToConstantMemory(Loc
, OrLocal
);
597 // An alloca instruction defines local memory.
598 if (OrLocal
&& isa
<AllocaInst
>(V
))
601 // A global constant counts as local memory for our purposes.
602 if (const GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
)) {
603 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
604 // global to be marked constant in some modules and non-constant in
605 // others. GV may even be a declaration, not a definition.
606 if (!GV
->isConstant()) {
608 return AAResultBase::pointsToConstantMemory(Loc
, OrLocal
);
613 // If both select values point to local memory, then so does the select.
614 if (const SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
615 Worklist
.push_back(SI
->getTrueValue());
616 Worklist
.push_back(SI
->getFalseValue());
620 // If all values incoming to a phi node point to local memory, then so does
622 if (const PHINode
*PN
= dyn_cast
<PHINode
>(V
)) {
623 // Don't bother inspecting phi nodes with many operands.
624 if (PN
->getNumIncomingValues() > MaxLookup
) {
626 return AAResultBase::pointsToConstantMemory(Loc
, OrLocal
);
628 for (Value
*IncValue
: PN
->incoming_values())
629 Worklist
.push_back(IncValue
);
633 // Otherwise be conservative.
635 return AAResultBase::pointsToConstantMemory(Loc
, OrLocal
);
636 } while (!Worklist
.empty() && --MaxLookup
);
639 return Worklist
.empty();
642 /// Returns the behavior when calling the given call site.
643 FunctionModRefBehavior
BasicAAResult::getModRefBehavior(ImmutableCallSite CS
) {
644 if (CS
.doesNotAccessMemory())
645 // Can't do better than this.
646 return FMRB_DoesNotAccessMemory
;
648 FunctionModRefBehavior Min
= FMRB_UnknownModRefBehavior
;
650 // If the callsite knows it only reads memory, don't return worse
652 if (CS
.onlyReadsMemory())
653 Min
= FMRB_OnlyReadsMemory
;
654 else if (CS
.doesNotReadMemory())
655 Min
= FMRB_DoesNotReadMemory
;
657 if (CS
.onlyAccessesArgMemory())
658 Min
= FunctionModRefBehavior(Min
& FMRB_OnlyAccessesArgumentPointees
);
659 else if (CS
.onlyAccessesInaccessibleMemory())
660 Min
= FunctionModRefBehavior(Min
& FMRB_OnlyAccessesInaccessibleMem
);
661 else if (CS
.onlyAccessesInaccessibleMemOrArgMem())
662 Min
= FunctionModRefBehavior(Min
& FMRB_OnlyAccessesInaccessibleOrArgMem
);
664 // If CS has operand bundles then aliasing attributes from the function it
665 // calls do not directly apply to the CallSite. This can be made more
666 // precise in the future.
667 if (!CS
.hasOperandBundles())
668 if (const Function
*F
= CS
.getCalledFunction())
670 FunctionModRefBehavior(Min
& getBestAAResults().getModRefBehavior(F
));
675 /// Returns the behavior when calling the given function. For use when the call
676 /// site is not known.
677 FunctionModRefBehavior
BasicAAResult::getModRefBehavior(const Function
*F
) {
678 // If the function declares it doesn't access memory, we can't do better.
679 if (F
->doesNotAccessMemory())
680 return FMRB_DoesNotAccessMemory
;
682 FunctionModRefBehavior Min
= FMRB_UnknownModRefBehavior
;
684 // If the function declares it only reads memory, go with that.
685 if (F
->onlyReadsMemory())
686 Min
= FMRB_OnlyReadsMemory
;
687 else if (F
->doesNotReadMemory())
688 Min
= FMRB_DoesNotReadMemory
;
690 if (F
->onlyAccessesArgMemory())
691 Min
= FunctionModRefBehavior(Min
& FMRB_OnlyAccessesArgumentPointees
);
692 else if (F
->onlyAccessesInaccessibleMemory())
693 Min
= FunctionModRefBehavior(Min
& FMRB_OnlyAccessesInaccessibleMem
);
694 else if (F
->onlyAccessesInaccessibleMemOrArgMem())
695 Min
= FunctionModRefBehavior(Min
& FMRB_OnlyAccessesInaccessibleOrArgMem
);
700 /// Returns true if this is a writeonly (i.e Mod only) parameter.
701 static bool isWriteOnlyParam(ImmutableCallSite CS
, unsigned ArgIdx
,
702 const TargetLibraryInfo
&TLI
) {
703 if (CS
.paramHasAttr(ArgIdx
, Attribute::WriteOnly
))
706 // We can bound the aliasing properties of memset_pattern16 just as we can
707 // for memcpy/memset. This is particularly important because the
708 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
709 // whenever possible.
710 // FIXME Consider handling this in InferFunctionAttr.cpp together with other
713 if (CS
.getCalledFunction() && TLI
.getLibFunc(*CS
.getCalledFunction(), F
) &&
714 F
== LibFunc_memset_pattern16
&& TLI
.has(F
))
718 // TODO: memset_pattern4, memset_pattern8
719 // TODO: _chk variants
720 // TODO: strcmp, strcpy
725 ModRefInfo
BasicAAResult::getArgModRefInfo(ImmutableCallSite CS
,
727 // Checking for known builtin intrinsics and target library functions.
728 if (isWriteOnlyParam(CS
, ArgIdx
, TLI
))
729 return ModRefInfo::Mod
;
731 if (CS
.paramHasAttr(ArgIdx
, Attribute::ReadOnly
))
732 return ModRefInfo::Ref
;
734 if (CS
.paramHasAttr(ArgIdx
, Attribute::ReadNone
))
735 return ModRefInfo::NoModRef
;
737 return AAResultBase::getArgModRefInfo(CS
, ArgIdx
);
740 static bool isIntrinsicCall(ImmutableCallSite CS
, Intrinsic::ID IID
) {
741 const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(CS
.getInstruction());
742 return II
&& II
->getIntrinsicID() == IID
;
746 static const Function
*getParent(const Value
*V
) {
747 if (const Instruction
*inst
= dyn_cast
<Instruction
>(V
)) {
748 if (!inst
->getParent())
750 return inst
->getParent()->getParent();
753 if (const Argument
*arg
= dyn_cast
<Argument
>(V
))
754 return arg
->getParent();
759 static bool notDifferentParent(const Value
*O1
, const Value
*O2
) {
761 const Function
*F1
= getParent(O1
);
762 const Function
*F2
= getParent(O2
);
764 return !F1
|| !F2
|| F1
== F2
;
768 AliasResult
BasicAAResult::alias(const MemoryLocation
&LocA
,
769 const MemoryLocation
&LocB
) {
770 assert(notDifferentParent(LocA
.Ptr
, LocB
.Ptr
) &&
771 "BasicAliasAnalysis doesn't support interprocedural queries.");
773 // If we have a directly cached entry for these locations, we have recursed
774 // through this once, so just return the cached results. Notably, when this
775 // happens, we don't clear the cache.
776 auto CacheIt
= AliasCache
.find(LocPair(LocA
, LocB
));
777 if (CacheIt
!= AliasCache
.end())
778 return CacheIt
->second
;
780 AliasResult Alias
= aliasCheck(LocA
.Ptr
, LocA
.Size
, LocA
.AATags
, LocB
.Ptr
,
781 LocB
.Size
, LocB
.AATags
);
782 // AliasCache rarely has more than 1 or 2 elements, always use
783 // shrink_and_clear so it quickly returns to the inline capacity of the
784 // SmallDenseMap if it ever grows larger.
785 // FIXME: This should really be shrink_to_inline_capacity_and_clear().
786 AliasCache
.shrink_and_clear();
787 VisitedPhiBBs
.clear();
791 /// Checks to see if the specified callsite can clobber the specified memory
794 /// Since we only look at local properties of this function, we really can't
795 /// say much about this query. We do, however, use simple "address taken"
796 /// analysis on local objects.
797 ModRefInfo
BasicAAResult::getModRefInfo(ImmutableCallSite CS
,
798 const MemoryLocation
&Loc
) {
799 assert(notDifferentParent(CS
.getInstruction(), Loc
.Ptr
) &&
800 "AliasAnalysis query involving multiple functions!");
802 const Value
*Object
= GetUnderlyingObject(Loc
.Ptr
, DL
);
804 // Calls marked 'tail' cannot read or write allocas from the current frame
805 // because the current frame might be destroyed by the time they run. However,
806 // a tail call may use an alloca with byval. Calling with byval copies the
807 // contents of the alloca into argument registers or stack slots, so there is
808 // no lifetime issue.
809 if (isa
<AllocaInst
>(Object
))
810 if (const CallInst
*CI
= dyn_cast
<CallInst
>(CS
.getInstruction()))
811 if (CI
->isTailCall() &&
812 !CI
->getAttributes().hasAttrSomewhere(Attribute::ByVal
))
813 return ModRefInfo::NoModRef
;
815 // If the pointer is to a locally allocated object that does not escape,
816 // then the call can not mod/ref the pointer unless the call takes the pointer
817 // as an argument, and itself doesn't capture it.
818 if (!isa
<Constant
>(Object
) && CS
.getInstruction() != Object
&&
819 isNonEscapingLocalObject(Object
)) {
821 // Optimistically assume that call doesn't touch Object and check this
822 // assumption in the following loop.
823 ModRefInfo Result
= ModRefInfo::NoModRef
;
824 bool IsMustAlias
= true;
826 unsigned OperandNo
= 0;
827 for (auto CI
= CS
.data_operands_begin(), CE
= CS
.data_operands_end();
828 CI
!= CE
; ++CI
, ++OperandNo
) {
829 // Only look at the no-capture or byval pointer arguments. If this
830 // pointer were passed to arguments that were neither of these, then it
831 // couldn't be no-capture.
832 if (!(*CI
)->getType()->isPointerTy() ||
833 (!CS
.doesNotCapture(OperandNo
) &&
834 OperandNo
< CS
.getNumArgOperands() && !CS
.isByValArgument(OperandNo
)))
837 // Call doesn't access memory through this operand, so we don't care
838 // if it aliases with Object.
839 if (CS
.doesNotAccessMemory(OperandNo
))
842 // If this is a no-capture pointer argument, see if we can tell that it
843 // is impossible to alias the pointer we're checking.
845 getBestAAResults().alias(MemoryLocation(*CI
), MemoryLocation(Object
));
848 // Operand doesnt alias 'Object', continue looking for other aliases
851 // Operand aliases 'Object', but call doesn't modify it. Strengthen
852 // initial assumption and keep looking in case if there are more aliases.
853 if (CS
.onlyReadsMemory(OperandNo
)) {
854 Result
= setRef(Result
);
857 // Operand aliases 'Object' but call only writes into it.
858 if (CS
.doesNotReadMemory(OperandNo
)) {
859 Result
= setMod(Result
);
862 // This operand aliases 'Object' and call reads and writes into it.
863 // Setting ModRef will not yield an early return below, MustAlias is not
865 Result
= ModRefInfo::ModRef
;
869 // No operand aliases, reset Must bit. Add below if at least one aliases
870 // and all aliases found are MustAlias.
871 if (isNoModRef(Result
))
874 // Early return if we improved mod ref information
875 if (!isModAndRefSet(Result
)) {
876 if (isNoModRef(Result
))
877 return ModRefInfo::NoModRef
;
878 return IsMustAlias
? setMust(Result
) : clearMust(Result
);
882 // If the CallSite is to malloc or calloc, we can assume that it doesn't
883 // modify any IR visible value. This is only valid because we assume these
884 // routines do not read values visible in the IR. TODO: Consider special
885 // casing realloc and strdup routines which access only their arguments as
886 // well. Or alternatively, replace all of this with inaccessiblememonly once
887 // that's implemented fully.
888 auto *Inst
= CS
.getInstruction();
889 if (isMallocOrCallocLikeFn(Inst
, &TLI
)) {
890 // Be conservative if the accessed pointer may alias the allocation -
891 // fallback to the generic handling below.
892 if (getBestAAResults().alias(MemoryLocation(Inst
), Loc
) == NoAlias
)
893 return ModRefInfo::NoModRef
;
896 // The semantics of memcpy intrinsics forbid overlap between their respective
897 // operands, i.e., source and destination of any given memcpy must no-alias.
898 // If Loc must-aliases either one of these two locations, then it necessarily
899 // no-aliases the other.
900 if (auto *Inst
= dyn_cast
<AnyMemCpyInst
>(CS
.getInstruction())) {
901 AliasResult SrcAA
, DestAA
;
903 if ((SrcAA
= getBestAAResults().alias(MemoryLocation::getForSource(Inst
),
905 // Loc is exactly the memcpy source thus disjoint from memcpy dest.
906 return ModRefInfo::Ref
;
907 if ((DestAA
= getBestAAResults().alias(MemoryLocation::getForDest(Inst
),
909 // The converse case.
910 return ModRefInfo::Mod
;
912 // It's also possible for Loc to alias both src and dest, or neither.
913 ModRefInfo rv
= ModRefInfo::NoModRef
;
914 if (SrcAA
!= NoAlias
)
916 if (DestAA
!= NoAlias
)
921 // While the assume intrinsic is marked as arbitrarily writing so that
922 // proper control dependencies will be maintained, it never aliases any
923 // particular memory location.
924 if (isIntrinsicCall(CS
, Intrinsic::assume
))
925 return ModRefInfo::NoModRef
;
927 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
928 // that proper control dependencies are maintained but they never mods any
929 // particular memory location.
931 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
932 // heap state at the point the guard is issued needs to be consistent in case
933 // the guard invokes the "deopt" continuation.
934 if (isIntrinsicCall(CS
, Intrinsic::experimental_guard
))
935 return ModRefInfo::Ref
;
937 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
938 // writing so that proper control dependencies are maintained but they never
939 // mod any particular memory location visible to the IR.
940 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
941 // intrinsic is now modeled as reading memory. This prevents hoisting the
942 // invariant.start intrinsic over stores. Consider:
945 // invariant_start(ptr)
949 // This cannot be transformed to:
952 // invariant_start(ptr)
957 // The transformation will cause the second store to be ignored (based on
958 // rules of invariant.start) and print 40, while the first program always
960 if (isIntrinsicCall(CS
, Intrinsic::invariant_start
))
961 return ModRefInfo::Ref
;
963 // The AAResultBase base class has some smarts, lets use them.
964 return AAResultBase::getModRefInfo(CS
, Loc
);
967 ModRefInfo
BasicAAResult::getModRefInfo(ImmutableCallSite CS1
,
968 ImmutableCallSite CS2
) {
969 // While the assume intrinsic is marked as arbitrarily writing so that
970 // proper control dependencies will be maintained, it never aliases any
971 // particular memory location.
972 if (isIntrinsicCall(CS1
, Intrinsic::assume
) ||
973 isIntrinsicCall(CS2
, Intrinsic::assume
))
974 return ModRefInfo::NoModRef
;
976 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
977 // that proper control dependencies are maintained but they never mod any
978 // particular memory location.
980 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
981 // heap state at the point the guard is issued needs to be consistent in case
982 // the guard invokes the "deopt" continuation.
984 // NB! This function is *not* commutative, so we specical case two
985 // possibilities for guard intrinsics.
987 if (isIntrinsicCall(CS1
, Intrinsic::experimental_guard
))
988 return isModSet(createModRefInfo(getModRefBehavior(CS2
)))
990 : ModRefInfo::NoModRef
;
992 if (isIntrinsicCall(CS2
, Intrinsic::experimental_guard
))
993 return isModSet(createModRefInfo(getModRefBehavior(CS1
)))
995 : ModRefInfo::NoModRef
;
997 // The AAResultBase base class has some smarts, lets use them.
998 return AAResultBase::getModRefInfo(CS1
, CS2
);
1001 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
1002 /// both having the exact same pointer operand.
1003 static AliasResult
aliasSameBasePointerGEPs(const GEPOperator
*GEP1
,
1004 LocationSize MaybeV1Size
,
1005 const GEPOperator
*GEP2
,
1006 LocationSize MaybeV2Size
,
1007 const DataLayout
&DL
) {
1008 assert(GEP1
->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1009 GEP2
->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1010 GEP1
->getPointerOperandType() == GEP2
->getPointerOperandType() &&
1011 "Expected GEPs with the same pointer operand");
1013 // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1014 // such that the struct field accesses provably cannot alias.
1015 // We also need at least two indices (the pointer, and the struct field).
1016 if (GEP1
->getNumIndices() != GEP2
->getNumIndices() ||
1017 GEP1
->getNumIndices() < 2)
1020 // If we don't know the size of the accesses through both GEPs, we can't
1021 // determine whether the struct fields accessed can't alias.
1022 if (MaybeV1Size
== LocationSize::unknown() ||
1023 MaybeV2Size
== LocationSize::unknown())
1026 const uint64_t V1Size
= MaybeV1Size
.getValue();
1027 const uint64_t V2Size
= MaybeV2Size
.getValue();
1030 dyn_cast
<ConstantInt
>(GEP1
->getOperand(GEP1
->getNumOperands() - 1));
1032 dyn_cast
<ConstantInt
>(GEP2
->getOperand(GEP2
->getNumOperands() - 1));
1034 // If the last (struct) indices are constants and are equal, the other indices
1035 // might be also be dynamically equal, so the GEPs can alias.
1036 if (C1
&& C2
&& C1
->getSExtValue() == C2
->getSExtValue())
1039 // Find the last-indexed type of the GEP, i.e., the type you'd get if
1040 // you stripped the last index.
1041 // On the way, look at each indexed type. If there's something other
1042 // than an array, different indices can lead to different final types.
1043 SmallVector
<Value
*, 8> IntermediateIndices
;
1045 // Insert the first index; we don't need to check the type indexed
1046 // through it as it only drops the pointer indirection.
1047 assert(GEP1
->getNumIndices() > 1 && "Not enough GEP indices to examine");
1048 IntermediateIndices
.push_back(GEP1
->getOperand(1));
1050 // Insert all the remaining indices but the last one.
1051 // Also, check that they all index through arrays.
1052 for (unsigned i
= 1, e
= GEP1
->getNumIndices() - 1; i
!= e
; ++i
) {
1053 if (!isa
<ArrayType
>(GetElementPtrInst::getIndexedType(
1054 GEP1
->getSourceElementType(), IntermediateIndices
)))
1056 IntermediateIndices
.push_back(GEP1
->getOperand(i
+ 1));
1059 auto *Ty
= GetElementPtrInst::getIndexedType(
1060 GEP1
->getSourceElementType(), IntermediateIndices
);
1061 StructType
*LastIndexedStruct
= dyn_cast
<StructType
>(Ty
);
1063 if (isa
<SequentialType
>(Ty
)) {
1065 // - both GEPs begin indexing from the exact same pointer;
1066 // - the last indices in both GEPs are constants, indexing into a sequential
1067 // type (array or pointer);
1068 // - both GEPs only index through arrays prior to that.
1070 // Because array indices greater than the number of elements are valid in
1071 // GEPs, unless we know the intermediate indices are identical between
1072 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1073 // partially overlap. We also need to check that the loaded size matches
1074 // the element size, otherwise we could still have overlap.
1075 const uint64_t ElementSize
=
1076 DL
.getTypeStoreSize(cast
<SequentialType
>(Ty
)->getElementType());
1077 if (V1Size
!= ElementSize
|| V2Size
!= ElementSize
)
1080 for (unsigned i
= 0, e
= GEP1
->getNumIndices() - 1; i
!= e
; ++i
)
1081 if (GEP1
->getOperand(i
+ 1) != GEP2
->getOperand(i
+ 1))
1084 // Now we know that the array/pointer that GEP1 indexes into and that
1085 // that GEP2 indexes into must either precisely overlap or be disjoint.
1086 // Because they cannot partially overlap and because fields in an array
1087 // cannot overlap, if we can prove the final indices are different between
1088 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1090 // If the last indices are constants, we've already checked they don't
1091 // equal each other so we can exit early.
1095 Value
*GEP1LastIdx
= GEP1
->getOperand(GEP1
->getNumOperands() - 1);
1096 Value
*GEP2LastIdx
= GEP2
->getOperand(GEP2
->getNumOperands() - 1);
1097 if (isa
<PHINode
>(GEP1LastIdx
) || isa
<PHINode
>(GEP2LastIdx
)) {
1098 // If one of the indices is a PHI node, be safe and only use
1099 // computeKnownBits so we don't make any assumptions about the
1100 // relationships between the two indices. This is important if we're
1101 // asking about values from different loop iterations. See PR32314.
1102 // TODO: We may be able to change the check so we only do this when
1103 // we definitely looked through a PHINode.
1104 if (GEP1LastIdx
!= GEP2LastIdx
&&
1105 GEP1LastIdx
->getType() == GEP2LastIdx
->getType()) {
1106 KnownBits Known1
= computeKnownBits(GEP1LastIdx
, DL
);
1107 KnownBits Known2
= computeKnownBits(GEP2LastIdx
, DL
);
1108 if (Known1
.Zero
.intersects(Known2
.One
) ||
1109 Known1
.One
.intersects(Known2
.Zero
))
1112 } else if (isKnownNonEqual(GEP1LastIdx
, GEP2LastIdx
, DL
))
1116 } else if (!LastIndexedStruct
|| !C1
|| !C2
) {
1121 // - both GEPs begin indexing from the exact same pointer;
1122 // - the last indices in both GEPs are constants, indexing into a struct;
1123 // - said indices are different, hence, the pointed-to fields are different;
1124 // - both GEPs only index through arrays prior to that.
1126 // This lets us determine that the struct that GEP1 indexes into and the
1127 // struct that GEP2 indexes into must either precisely overlap or be
1128 // completely disjoint. Because they cannot partially overlap, indexing into
1129 // different non-overlapping fields of the struct will never alias.
1131 // Therefore, the only remaining thing needed to show that both GEPs can't
1132 // alias is that the fields are not overlapping.
1133 const StructLayout
*SL
= DL
.getStructLayout(LastIndexedStruct
);
1134 const uint64_t StructSize
= SL
->getSizeInBytes();
1135 const uint64_t V1Off
= SL
->getElementOffset(C1
->getZExtValue());
1136 const uint64_t V2Off
= SL
->getElementOffset(C2
->getZExtValue());
1138 auto EltsDontOverlap
= [StructSize
](uint64_t V1Off
, uint64_t V1Size
,
1139 uint64_t V2Off
, uint64_t V2Size
) {
1140 return V1Off
< V2Off
&& V1Off
+ V1Size
<= V2Off
&&
1141 ((V2Off
+ V2Size
<= StructSize
) ||
1142 (V2Off
+ V2Size
- StructSize
<= V1Off
));
1145 if (EltsDontOverlap(V1Off
, V1Size
, V2Off
, V2Size
) ||
1146 EltsDontOverlap(V2Off
, V2Size
, V1Off
, V1Size
))
1152 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1153 // beginning of the object the GEP points would have a negative offset with
1154 // repsect to the alloca, that means the GEP can not alias pointer (b).
1155 // Note that the pointer based on the alloca may not be a GEP. For
1156 // example, it may be the alloca itself.
1157 // The same applies if (b) is based on a GlobalVariable. Note that just being
1158 // based on isIdentifiedObject() is not enough - we need an identified object
1159 // that does not permit access to negative offsets. For example, a negative
1160 // offset from a noalias argument or call can be inbounds w.r.t the actual
1161 // underlying object.
1163 // For example, consider:
1165 // struct { int f0, int f1, ...} foo;
1167 // foo* random = bar(alloca);
1168 // int *f0 = &alloca.f0
1169 // int *f1 = &random->f1;
1171 // Which is lowered, approximately, to:
1173 // %alloca = alloca %struct.foo
1174 // %random = call %struct.foo* @random(%struct.foo* %alloca)
1175 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1176 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1178 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1179 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1180 // point into the same object. But since %f0 points to the beginning of %alloca,
1181 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1182 // than (%alloca - 1), and so is not inbounds, a contradiction.
1183 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator
*GEPOp
,
1184 const DecomposedGEP
&DecompGEP
, const DecomposedGEP
&DecompObject
,
1185 LocationSize MaybeObjectAccessSize
) {
1186 // If the object access size is unknown, or the GEP isn't inbounds, bail.
1187 if (MaybeObjectAccessSize
== LocationSize::unknown() || !GEPOp
->isInBounds())
1190 const uint64_t ObjectAccessSize
= MaybeObjectAccessSize
.getValue();
1192 // We need the object to be an alloca or a globalvariable, and want to know
1193 // the offset of the pointer from the object precisely, so no variable
1194 // indices are allowed.
1195 if (!(isa
<AllocaInst
>(DecompObject
.Base
) ||
1196 isa
<GlobalVariable
>(DecompObject
.Base
)) ||
1197 !DecompObject
.VarIndices
.empty())
1200 int64_t ObjectBaseOffset
= DecompObject
.StructOffset
+
1201 DecompObject
.OtherOffset
;
1203 // If the GEP has no variable indices, we know the precise offset
1204 // from the base, then use it. If the GEP has variable indices,
1205 // we can't get exact GEP offset to identify pointer alias. So return
1206 // false in that case.
1207 if (!DecompGEP
.VarIndices
.empty())
1209 int64_t GEPBaseOffset
= DecompGEP
.StructOffset
;
1210 GEPBaseOffset
+= DecompGEP
.OtherOffset
;
1212 return (GEPBaseOffset
>= ObjectBaseOffset
+ (int64_t)ObjectAccessSize
);
1215 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1216 /// another pointer.
1218 /// We know that V1 is a GEP, but we don't know anything about V2.
1219 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1222 BasicAAResult::aliasGEP(const GEPOperator
*GEP1
, LocationSize V1Size
,
1223 const AAMDNodes
&V1AAInfo
, const Value
*V2
,
1224 LocationSize V2Size
, const AAMDNodes
&V2AAInfo
,
1225 const Value
*UnderlyingV1
, const Value
*UnderlyingV2
) {
1226 DecomposedGEP DecompGEP1
, DecompGEP2
;
1227 bool GEP1MaxLookupReached
=
1228 DecomposeGEPExpression(GEP1
, DecompGEP1
, DL
, &AC
, DT
);
1229 bool GEP2MaxLookupReached
=
1230 DecomposeGEPExpression(V2
, DecompGEP2
, DL
, &AC
, DT
);
1232 int64_t GEP1BaseOffset
= DecompGEP1
.StructOffset
+ DecompGEP1
.OtherOffset
;
1233 int64_t GEP2BaseOffset
= DecompGEP2
.StructOffset
+ DecompGEP2
.OtherOffset
;
1235 assert(DecompGEP1
.Base
== UnderlyingV1
&& DecompGEP2
.Base
== UnderlyingV2
&&
1236 "DecomposeGEPExpression returned a result different from "
1237 "GetUnderlyingObject");
1239 // If the GEP's offset relative to its base is such that the base would
1240 // fall below the start of the object underlying V2, then the GEP and V2
1242 if (!GEP1MaxLookupReached
&& !GEP2MaxLookupReached
&&
1243 isGEPBaseAtNegativeOffset(GEP1
, DecompGEP1
, DecompGEP2
, V2Size
))
1245 // If we have two gep instructions with must-alias or not-alias'ing base
1246 // pointers, figure out if the indexes to the GEP tell us anything about the
1248 if (const GEPOperator
*GEP2
= dyn_cast
<GEPOperator
>(V2
)) {
1249 // Check for the GEP base being at a negative offset, this time in the other
1251 if (!GEP1MaxLookupReached
&& !GEP2MaxLookupReached
&&
1252 isGEPBaseAtNegativeOffset(GEP2
, DecompGEP2
, DecompGEP1
, V1Size
))
1254 // Do the base pointers alias?
1255 AliasResult BaseAlias
=
1256 aliasCheck(UnderlyingV1
, LocationSize::unknown(), AAMDNodes(),
1257 UnderlyingV2
, LocationSize::unknown(), AAMDNodes());
1259 // Check for geps of non-aliasing underlying pointers where the offsets are
1261 if ((BaseAlias
== MayAlias
) && V1Size
== V2Size
) {
1262 // Do the base pointers alias assuming type and size.
1263 AliasResult PreciseBaseAlias
= aliasCheck(UnderlyingV1
, V1Size
, V1AAInfo
,
1264 UnderlyingV2
, V2Size
, V2AAInfo
);
1265 if (PreciseBaseAlias
== NoAlias
) {
1266 // See if the computed offset from the common pointer tells us about the
1267 // relation of the resulting pointer.
1268 // If the max search depth is reached the result is undefined
1269 if (GEP2MaxLookupReached
|| GEP1MaxLookupReached
)
1273 if (GEP1BaseOffset
== GEP2BaseOffset
&&
1274 DecompGEP1
.VarIndices
== DecompGEP2
.VarIndices
)
1279 // If we get a No or May, then return it immediately, no amount of analysis
1280 // will improve this situation.
1281 if (BaseAlias
!= MustAlias
) {
1282 assert(BaseAlias
== NoAlias
|| BaseAlias
== MayAlias
);
1286 // Otherwise, we have a MustAlias. Since the base pointers alias each other
1287 // exactly, see if the computed offset from the common pointer tells us
1288 // about the relation of the resulting pointer.
1289 // If we know the two GEPs are based off of the exact same pointer (and not
1290 // just the same underlying object), see if that tells us anything about
1291 // the resulting pointers.
1292 if (GEP1
->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1293 GEP2
->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1294 GEP1
->getPointerOperandType() == GEP2
->getPointerOperandType()) {
1295 AliasResult R
= aliasSameBasePointerGEPs(GEP1
, V1Size
, GEP2
, V2Size
, DL
);
1296 // If we couldn't find anything interesting, don't abandon just yet.
1301 // If the max search depth is reached, the result is undefined
1302 if (GEP2MaxLookupReached
|| GEP1MaxLookupReached
)
1305 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1306 // symbolic difference.
1307 GEP1BaseOffset
-= GEP2BaseOffset
;
1308 GetIndexDifference(DecompGEP1
.VarIndices
, DecompGEP2
.VarIndices
);
1311 // Check to see if these two pointers are related by the getelementptr
1312 // instruction. If one pointer is a GEP with a non-zero index of the other
1313 // pointer, we know they cannot alias.
1315 // If both accesses are unknown size, we can't do anything useful here.
1316 if (V1Size
== LocationSize::unknown() && V2Size
== LocationSize::unknown())
1320 aliasCheck(UnderlyingV1
, LocationSize::unknown(), AAMDNodes(), V2
,
1321 LocationSize::unknown(), V2AAInfo
, nullptr, UnderlyingV2
);
1322 if (R
!= MustAlias
) {
1323 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1324 // If V2 is known not to alias GEP base pointer, then the two values
1325 // cannot alias per GEP semantics: "Any memory access must be done through
1326 // a pointer value associated with an address range of the memory access,
1327 // otherwise the behavior is undefined.".
1328 assert(R
== NoAlias
|| R
== MayAlias
);
1332 // If the max search depth is reached the result is undefined
1333 if (GEP1MaxLookupReached
)
1337 // In the two GEP Case, if there is no difference in the offsets of the
1338 // computed pointers, the resultant pointers are a must alias. This
1339 // happens when we have two lexically identical GEP's (for example).
1341 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1342 // must aliases the GEP, the end result is a must alias also.
1343 if (GEP1BaseOffset
== 0 && DecompGEP1
.VarIndices
.empty())
1346 // If there is a constant difference between the pointers, but the difference
1347 // is less than the size of the associated memory object, then we know
1348 // that the objects are partially overlapping. If the difference is
1349 // greater, we know they do not overlap.
1350 if (GEP1BaseOffset
!= 0 && DecompGEP1
.VarIndices
.empty()) {
1351 if (GEP1BaseOffset
>= 0) {
1352 if (V2Size
!= LocationSize::unknown()) {
1353 if ((uint64_t)GEP1BaseOffset
< V2Size
.getValue())
1354 return PartialAlias
;
1358 // We have the situation where:
1361 // ---------------->|
1362 // |-->V1Size |-------> V2Size
1364 // We need to know that V2Size is not unknown, otherwise we might have
1365 // stripped a gep with negative index ('gep <ptr>, -1, ...).
1366 if (V1Size
!= LocationSize::unknown() &&
1367 V2Size
!= LocationSize::unknown()) {
1368 if (-(uint64_t)GEP1BaseOffset
< V1Size
.getValue())
1369 return PartialAlias
;
1375 if (!DecompGEP1
.VarIndices
.empty()) {
1376 uint64_t Modulo
= 0;
1377 bool AllPositive
= true;
1378 for (unsigned i
= 0, e
= DecompGEP1
.VarIndices
.size(); i
!= e
; ++i
) {
1380 // Try to distinguish something like &A[i][1] against &A[42][0].
1381 // Grab the least significant bit set in any of the scales. We
1382 // don't need std::abs here (even if the scale's negative) as we'll
1383 // be ^'ing Modulo with itself later.
1384 Modulo
|= (uint64_t)DecompGEP1
.VarIndices
[i
].Scale
;
1387 // If the Value could change between cycles, then any reasoning about
1388 // the Value this cycle may not hold in the next cycle. We'll just
1389 // give up if we can't determine conditions that hold for every cycle:
1390 const Value
*V
= DecompGEP1
.VarIndices
[i
].V
;
1392 KnownBits Known
= computeKnownBits(V
, DL
, 0, &AC
, nullptr, DT
);
1393 bool SignKnownZero
= Known
.isNonNegative();
1394 bool SignKnownOne
= Known
.isNegative();
1396 // Zero-extension widens the variable, and so forces the sign
1398 bool IsZExt
= DecompGEP1
.VarIndices
[i
].ZExtBits
> 0 || isa
<ZExtInst
>(V
);
1399 SignKnownZero
|= IsZExt
;
1400 SignKnownOne
&= !IsZExt
;
1402 // If the variable begins with a zero then we know it's
1403 // positive, regardless of whether the value is signed or
1405 int64_t Scale
= DecompGEP1
.VarIndices
[i
].Scale
;
1407 (SignKnownZero
&& Scale
>= 0) || (SignKnownOne
&& Scale
< 0);
1411 Modulo
= Modulo
^ (Modulo
& (Modulo
- 1));
1413 // We can compute the difference between the two addresses
1414 // mod Modulo. Check whether that difference guarantees that the
1415 // two locations do not alias.
1416 uint64_t ModOffset
= (uint64_t)GEP1BaseOffset
& (Modulo
- 1);
1417 if (V1Size
!= LocationSize::unknown() &&
1418 V2Size
!= LocationSize::unknown() && ModOffset
>= V2Size
.getValue() &&
1419 V1Size
.getValue() <= Modulo
- ModOffset
)
1422 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1423 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1424 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1425 if (AllPositive
&& GEP1BaseOffset
> 0 &&
1426 V2Size
!= LocationSize::unknown() &&
1427 V2Size
.getValue() <= (uint64_t)GEP1BaseOffset
)
1430 if (constantOffsetHeuristic(DecompGEP1
.VarIndices
, V1Size
, V2Size
,
1431 GEP1BaseOffset
, &AC
, DT
))
1435 // Statically, we can see that the base objects are the same, but the
1436 // pointers have dynamic offsets which we can't resolve. And none of our
1437 // little tricks above worked.
1441 static AliasResult
MergeAliasResults(AliasResult A
, AliasResult B
) {
1442 // If the results agree, take it.
1445 // A mix of PartialAlias and MustAlias is PartialAlias.
1446 if ((A
== PartialAlias
&& B
== MustAlias
) ||
1447 (B
== PartialAlias
&& A
== MustAlias
))
1448 return PartialAlias
;
1449 // Otherwise, we don't know anything.
1453 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1454 /// against another.
1455 AliasResult
BasicAAResult::aliasSelect(const SelectInst
*SI
,
1456 LocationSize SISize
,
1457 const AAMDNodes
&SIAAInfo
,
1458 const Value
*V2
, LocationSize V2Size
,
1459 const AAMDNodes
&V2AAInfo
,
1460 const Value
*UnderV2
) {
1461 // If the values are Selects with the same condition, we can do a more precise
1462 // check: just check for aliases between the values on corresponding arms.
1463 if (const SelectInst
*SI2
= dyn_cast
<SelectInst
>(V2
))
1464 if (SI
->getCondition() == SI2
->getCondition()) {
1465 AliasResult Alias
= aliasCheck(SI
->getTrueValue(), SISize
, SIAAInfo
,
1466 SI2
->getTrueValue(), V2Size
, V2AAInfo
);
1467 if (Alias
== MayAlias
)
1469 AliasResult ThisAlias
=
1470 aliasCheck(SI
->getFalseValue(), SISize
, SIAAInfo
,
1471 SI2
->getFalseValue(), V2Size
, V2AAInfo
);
1472 return MergeAliasResults(ThisAlias
, Alias
);
1475 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1476 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1478 aliasCheck(V2
, V2Size
, V2AAInfo
, SI
->getTrueValue(),
1479 SISize
, SIAAInfo
, UnderV2
);
1480 if (Alias
== MayAlias
)
1483 AliasResult ThisAlias
=
1484 aliasCheck(V2
, V2Size
, V2AAInfo
, SI
->getFalseValue(), SISize
, SIAAInfo
,
1486 return MergeAliasResults(ThisAlias
, Alias
);
1489 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1491 AliasResult
BasicAAResult::aliasPHI(const PHINode
*PN
, LocationSize PNSize
,
1492 const AAMDNodes
&PNAAInfo
, const Value
*V2
,
1493 LocationSize V2Size
,
1494 const AAMDNodes
&V2AAInfo
,
1495 const Value
*UnderV2
) {
1496 // Track phi nodes we have visited. We use this information when we determine
1497 // value equivalence.
1498 VisitedPhiBBs
.insert(PN
->getParent());
1500 // If the values are PHIs in the same block, we can do a more precise
1501 // as well as efficient check: just check for aliases between the values
1502 // on corresponding edges.
1503 if (const PHINode
*PN2
= dyn_cast
<PHINode
>(V2
))
1504 if (PN2
->getParent() == PN
->getParent()) {
1505 LocPair
Locs(MemoryLocation(PN
, PNSize
, PNAAInfo
),
1506 MemoryLocation(V2
, V2Size
, V2AAInfo
));
1508 std::swap(Locs
.first
, Locs
.second
);
1509 // Analyse the PHIs' inputs under the assumption that the PHIs are
1511 // If the PHIs are May/MustAlias there must be (recursively) an input
1512 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1513 // there must be an operation on the PHIs within the PHIs' value cycle
1514 // that causes a MayAlias.
1515 // Pretend the phis do not alias.
1516 AliasResult Alias
= NoAlias
;
1517 assert(AliasCache
.count(Locs
) &&
1518 "There must exist an entry for the phi node");
1519 AliasResult OrigAliasResult
= AliasCache
[Locs
];
1520 AliasCache
[Locs
] = NoAlias
;
1522 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
1523 AliasResult ThisAlias
=
1524 aliasCheck(PN
->getIncomingValue(i
), PNSize
, PNAAInfo
,
1525 PN2
->getIncomingValueForBlock(PN
->getIncomingBlock(i
)),
1527 Alias
= MergeAliasResults(ThisAlias
, Alias
);
1528 if (Alias
== MayAlias
)
1532 // Reset if speculation failed.
1533 if (Alias
!= NoAlias
)
1534 AliasCache
[Locs
] = OrigAliasResult
;
1539 SmallVector
<Value
*, 4> V1Srcs
;
1540 bool isRecursive
= false;
1542 // If we have PhiValues then use it to get the underlying phi values.
1543 const PhiValues::ValueSet
&PhiValueSet
= PV
->getValuesForPhi(PN
);
1544 // If we have more phi values than the search depth then return MayAlias
1545 // conservatively to avoid compile time explosion. The worst possible case
1546 // is if both sides are PHI nodes. In which case, this is O(m x n) time
1547 // where 'm' and 'n' are the number of PHI sources.
1548 if (PhiValueSet
.size() > MaxLookupSearchDepth
)
1550 // Add the values to V1Srcs
1551 for (Value
*PV1
: PhiValueSet
) {
1552 if (EnableRecPhiAnalysis
) {
1553 if (GEPOperator
*PV1GEP
= dyn_cast
<GEPOperator
>(PV1
)) {
1554 // Check whether the incoming value is a GEP that advances the pointer
1555 // result of this PHI node (e.g. in a loop). If this is the case, we
1556 // would recurse and always get a MayAlias. Handle this case specially
1558 if (PV1GEP
->getPointerOperand() == PN
&& PV1GEP
->getNumIndices() == 1 &&
1559 isa
<ConstantInt
>(PV1GEP
->idx_begin())) {
1565 V1Srcs
.push_back(PV1
);
1568 // If we don't have PhiInfo then just look at the operands of the phi itself
1569 // FIXME: Remove this once we can guarantee that we have PhiInfo always
1570 SmallPtrSet
<Value
*, 4> UniqueSrc
;
1571 for (Value
*PV1
: PN
->incoming_values()) {
1572 if (isa
<PHINode
>(PV1
))
1573 // If any of the source itself is a PHI, return MayAlias conservatively
1574 // to avoid compile time explosion. The worst possible case is if both
1575 // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1576 // and 'n' are the number of PHI sources.
1579 if (EnableRecPhiAnalysis
)
1580 if (GEPOperator
*PV1GEP
= dyn_cast
<GEPOperator
>(PV1
)) {
1581 // Check whether the incoming value is a GEP that advances the pointer
1582 // result of this PHI node (e.g. in a loop). If this is the case, we
1583 // would recurse and always get a MayAlias. Handle this case specially
1585 if (PV1GEP
->getPointerOperand() == PN
&& PV1GEP
->getNumIndices() == 1 &&
1586 isa
<ConstantInt
>(PV1GEP
->idx_begin())) {
1592 if (UniqueSrc
.insert(PV1
).second
)
1593 V1Srcs
.push_back(PV1
);
1597 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1598 // value. This should only be possible in blocks unreachable from the entry
1599 // block, but return MayAlias just in case.
1603 // If this PHI node is recursive, set the size of the accessed memory to
1604 // unknown to represent all the possible values the GEP could advance the
1607 PNSize
= LocationSize::unknown();
1610 aliasCheck(V2
, V2Size
, V2AAInfo
, V1Srcs
[0],
1611 PNSize
, PNAAInfo
, UnderV2
);
1613 // Early exit if the check of the first PHI source against V2 is MayAlias.
1614 // Other results are not possible.
1615 if (Alias
== MayAlias
)
1618 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1619 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1620 for (unsigned i
= 1, e
= V1Srcs
.size(); i
!= e
; ++i
) {
1621 Value
*V
= V1Srcs
[i
];
1623 AliasResult ThisAlias
=
1624 aliasCheck(V2
, V2Size
, V2AAInfo
, V
, PNSize
, PNAAInfo
, UnderV2
);
1625 Alias
= MergeAliasResults(ThisAlias
, Alias
);
1626 if (Alias
== MayAlias
)
1633 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1634 /// array references.
1635 AliasResult
BasicAAResult::aliasCheck(const Value
*V1
, LocationSize V1Size
,
1636 AAMDNodes V1AAInfo
, const Value
*V2
,
1637 LocationSize V2Size
, AAMDNodes V2AAInfo
,
1638 const Value
*O1
, const Value
*O2
) {
1639 // If either of the memory references is empty, it doesn't matter what the
1640 // pointer values are.
1641 if (V1Size
== 0 || V2Size
== 0)
1644 // Strip off any casts if they exist.
1645 V1
= V1
->stripPointerCastsAndInvariantGroups();
1646 V2
= V2
->stripPointerCastsAndInvariantGroups();
1648 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1649 // value for undef that aliases nothing in the program.
1650 if (isa
<UndefValue
>(V1
) || isa
<UndefValue
>(V2
))
1653 // Are we checking for alias of the same value?
1654 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1655 // different iterations. We must therefore make sure that this is not the
1656 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1657 // happen by looking at the visited phi nodes and making sure they cannot
1659 if (isValueEqualInPotentialCycles(V1
, V2
))
1662 if (!V1
->getType()->isPointerTy() || !V2
->getType()->isPointerTy())
1663 return NoAlias
; // Scalars cannot alias each other
1665 // Figure out what objects these things are pointing to if we can.
1667 O1
= GetUnderlyingObject(V1
, DL
, MaxLookupSearchDepth
);
1670 O2
= GetUnderlyingObject(V2
, DL
, MaxLookupSearchDepth
);
1672 // Null values in the default address space don't point to any object, so they
1673 // don't alias any other pointer.
1674 if (const ConstantPointerNull
*CPN
= dyn_cast
<ConstantPointerNull
>(O1
))
1675 if (!NullPointerIsDefined(&F
, CPN
->getType()->getAddressSpace()))
1677 if (const ConstantPointerNull
*CPN
= dyn_cast
<ConstantPointerNull
>(O2
))
1678 if (!NullPointerIsDefined(&F
, CPN
->getType()->getAddressSpace()))
1682 // If V1/V2 point to two different objects, we know that we have no alias.
1683 if (isIdentifiedObject(O1
) && isIdentifiedObject(O2
))
1686 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1687 if ((isa
<Constant
>(O1
) && isIdentifiedObject(O2
) && !isa
<Constant
>(O2
)) ||
1688 (isa
<Constant
>(O2
) && isIdentifiedObject(O1
) && !isa
<Constant
>(O1
)))
1691 // Function arguments can't alias with things that are known to be
1692 // unambigously identified at the function level.
1693 if ((isa
<Argument
>(O1
) && isIdentifiedFunctionLocal(O2
)) ||
1694 (isa
<Argument
>(O2
) && isIdentifiedFunctionLocal(O1
)))
1697 // If one pointer is the result of a call/invoke or load and the other is a
1698 // non-escaping local object within the same function, then we know the
1699 // object couldn't escape to a point where the call could return it.
1701 // Note that if the pointers are in different functions, there are a
1702 // variety of complications. A call with a nocapture argument may still
1703 // temporary store the nocapture argument's value in a temporary memory
1704 // location if that memory location doesn't escape. Or it may pass a
1705 // nocapture value to other functions as long as they don't capture it.
1706 if (isEscapeSource(O1
) && isNonEscapingLocalObject(O2
))
1708 if (isEscapeSource(O2
) && isNonEscapingLocalObject(O1
))
1712 // If the size of one access is larger than the entire object on the other
1713 // side, then we know such behavior is undefined and can assume no alias.
1714 bool NullIsValidLocation
= NullPointerIsDefined(&F
);
1715 if ((V1Size
.isPrecise() && isObjectSmallerThan(O2
, V1Size
.getValue(), DL
, TLI
,
1716 NullIsValidLocation
)) ||
1717 (V2Size
.isPrecise() && isObjectSmallerThan(O1
, V2Size
.getValue(), DL
, TLI
,
1718 NullIsValidLocation
)))
1721 // Check the cache before climbing up use-def chains. This also terminates
1722 // otherwise infinitely recursive queries.
1723 LocPair
Locs(MemoryLocation(V1
, V1Size
, V1AAInfo
),
1724 MemoryLocation(V2
, V2Size
, V2AAInfo
));
1726 std::swap(Locs
.first
, Locs
.second
);
1727 std::pair
<AliasCacheTy::iterator
, bool> Pair
=
1728 AliasCache
.insert(std::make_pair(Locs
, MayAlias
));
1730 return Pair
.first
->second
;
1732 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1733 // GEP can't simplify, we don't even look at the PHI cases.
1734 if (!isa
<GEPOperator
>(V1
) && isa
<GEPOperator
>(V2
)) {
1736 std::swap(V1Size
, V2Size
);
1738 std::swap(V1AAInfo
, V2AAInfo
);
1740 if (const GEPOperator
*GV1
= dyn_cast
<GEPOperator
>(V1
)) {
1741 AliasResult Result
=
1742 aliasGEP(GV1
, V1Size
, V1AAInfo
, V2
, V2Size
, V2AAInfo
, O1
, O2
);
1743 if (Result
!= MayAlias
)
1744 return AliasCache
[Locs
] = Result
;
1747 if (isa
<PHINode
>(V2
) && !isa
<PHINode
>(V1
)) {
1750 std::swap(V1Size
, V2Size
);
1751 std::swap(V1AAInfo
, V2AAInfo
);
1753 if (const PHINode
*PN
= dyn_cast
<PHINode
>(V1
)) {
1754 AliasResult Result
= aliasPHI(PN
, V1Size
, V1AAInfo
,
1755 V2
, V2Size
, V2AAInfo
, O2
);
1756 if (Result
!= MayAlias
)
1757 return AliasCache
[Locs
] = Result
;
1760 if (isa
<SelectInst
>(V2
) && !isa
<SelectInst
>(V1
)) {
1763 std::swap(V1Size
, V2Size
);
1764 std::swap(V1AAInfo
, V2AAInfo
);
1766 if (const SelectInst
*S1
= dyn_cast
<SelectInst
>(V1
)) {
1767 AliasResult Result
=
1768 aliasSelect(S1
, V1Size
, V1AAInfo
, V2
, V2Size
, V2AAInfo
, O2
);
1769 if (Result
!= MayAlias
)
1770 return AliasCache
[Locs
] = Result
;
1773 // If both pointers are pointing into the same object and one of them
1774 // accesses the entire object, then the accesses must overlap in some way.
1776 if (V1Size
.isPrecise() && V2Size
.isPrecise() &&
1777 (isObjectSize(O1
, V1Size
.getValue(), DL
, TLI
, NullIsValidLocation
) ||
1778 isObjectSize(O2
, V2Size
.getValue(), DL
, TLI
, NullIsValidLocation
)))
1779 return AliasCache
[Locs
] = PartialAlias
;
1781 // Recurse back into the best AA results we have, potentially with refined
1782 // memory locations. We have already ensured that BasicAA has a MayAlias
1783 // cache result for these, so any recursion back into BasicAA won't loop.
1784 AliasResult Result
= getBestAAResults().alias(Locs
.first
, Locs
.second
);
1785 return AliasCache
[Locs
] = Result
;
1788 /// Check whether two Values can be considered equivalent.
1790 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1791 /// they can not be part of a cycle in the value graph by looking at all
1792 /// visited phi nodes an making sure that the phis cannot reach the value. We
1793 /// have to do this because we are looking through phi nodes (That is we say
1794 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1795 bool BasicAAResult::isValueEqualInPotentialCycles(const Value
*V
,
1800 const Instruction
*Inst
= dyn_cast
<Instruction
>(V
);
1804 if (VisitedPhiBBs
.empty())
1807 if (VisitedPhiBBs
.size() > MaxNumPhiBBsValueReachabilityCheck
)
1810 // Make sure that the visited phis cannot reach the Value. This ensures that
1811 // the Values cannot come from different iterations of a potential cycle the
1812 // phi nodes could be involved in.
1813 for (auto *P
: VisitedPhiBBs
)
1814 if (isPotentiallyReachable(&P
->front(), Inst
, DT
, LI
))
1820 /// Computes the symbolic difference between two de-composed GEPs.
1822 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1823 /// instructions GEP1 and GEP2 which have common base pointers.
1824 void BasicAAResult::GetIndexDifference(
1825 SmallVectorImpl
<VariableGEPIndex
> &Dest
,
1826 const SmallVectorImpl
<VariableGEPIndex
> &Src
) {
1830 for (unsigned i
= 0, e
= Src
.size(); i
!= e
; ++i
) {
1831 const Value
*V
= Src
[i
].V
;
1832 unsigned ZExtBits
= Src
[i
].ZExtBits
, SExtBits
= Src
[i
].SExtBits
;
1833 int64_t Scale
= Src
[i
].Scale
;
1835 // Find V in Dest. This is N^2, but pointer indices almost never have more
1836 // than a few variable indexes.
1837 for (unsigned j
= 0, e
= Dest
.size(); j
!= e
; ++j
) {
1838 if (!isValueEqualInPotentialCycles(Dest
[j
].V
, V
) ||
1839 Dest
[j
].ZExtBits
!= ZExtBits
|| Dest
[j
].SExtBits
!= SExtBits
)
1842 // If we found it, subtract off Scale V's from the entry in Dest. If it
1843 // goes to zero, remove the entry.
1844 if (Dest
[j
].Scale
!= Scale
)
1845 Dest
[j
].Scale
-= Scale
;
1847 Dest
.erase(Dest
.begin() + j
);
1852 // If we didn't consume this entry, add it to the end of the Dest list.
1854 VariableGEPIndex Entry
= {V
, ZExtBits
, SExtBits
, -Scale
};
1855 Dest
.push_back(Entry
);
1860 bool BasicAAResult::constantOffsetHeuristic(
1861 const SmallVectorImpl
<VariableGEPIndex
> &VarIndices
,
1862 LocationSize MaybeV1Size
, LocationSize MaybeV2Size
, int64_t BaseOffset
,
1863 AssumptionCache
*AC
, DominatorTree
*DT
) {
1864 if (VarIndices
.size() != 2 || MaybeV1Size
== LocationSize::unknown() ||
1865 MaybeV2Size
== LocationSize::unknown())
1868 const uint64_t V1Size
= MaybeV1Size
.getValue();
1869 const uint64_t V2Size
= MaybeV2Size
.getValue();
1871 const VariableGEPIndex
&Var0
= VarIndices
[0], &Var1
= VarIndices
[1];
1873 if (Var0
.ZExtBits
!= Var1
.ZExtBits
|| Var0
.SExtBits
!= Var1
.SExtBits
||
1874 Var0
.Scale
!= -Var1
.Scale
)
1877 unsigned Width
= Var1
.V
->getType()->getIntegerBitWidth();
1879 // We'll strip off the Extensions of Var0 and Var1 and do another round
1880 // of GetLinearExpression decomposition. In the example above, if Var0
1881 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1883 APInt
V0Scale(Width
, 0), V0Offset(Width
, 0), V1Scale(Width
, 0),
1885 bool NSW
= true, NUW
= true;
1886 unsigned V0ZExtBits
= 0, V0SExtBits
= 0, V1ZExtBits
= 0, V1SExtBits
= 0;
1887 const Value
*V0
= GetLinearExpression(Var0
.V
, V0Scale
, V0Offset
, V0ZExtBits
,
1888 V0SExtBits
, DL
, 0, AC
, DT
, NSW
, NUW
);
1891 const Value
*V1
= GetLinearExpression(Var1
.V
, V1Scale
, V1Offset
, V1ZExtBits
,
1892 V1SExtBits
, DL
, 0, AC
, DT
, NSW
, NUW
);
1894 if (V0Scale
!= V1Scale
|| V0ZExtBits
!= V1ZExtBits
||
1895 V0SExtBits
!= V1SExtBits
|| !isValueEqualInPotentialCycles(V0
, V1
))
1898 // We have a hit - Var0 and Var1 only differ by a constant offset!
1900 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1901 // Var1 is possible to calculate, but we're just interested in the absolute
1902 // minimum difference between the two. The minimum distance may occur due to
1903 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1904 // the minimum distance between %i and %i + 5 is 3.
1905 APInt MinDiff
= V0Offset
- V1Offset
, Wrapped
= -MinDiff
;
1906 MinDiff
= APIntOps::umin(MinDiff
, Wrapped
);
1907 uint64_t MinDiffBytes
= MinDiff
.getZExtValue() * std::abs(Var0
.Scale
);
1909 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1910 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1911 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1912 // V2Size can fit in the MinDiffBytes gap.
1913 return V1Size
+ std::abs(BaseOffset
) <= MinDiffBytes
&&
1914 V2Size
+ std::abs(BaseOffset
) <= MinDiffBytes
;
1917 //===----------------------------------------------------------------------===//
1918 // BasicAliasAnalysis Pass
1919 //===----------------------------------------------------------------------===//
1921 AnalysisKey
BasicAA::Key
;
1923 BasicAAResult
BasicAA::run(Function
&F
, FunctionAnalysisManager
&AM
) {
1924 return BasicAAResult(F
.getParent()->getDataLayout(),
1926 AM
.getResult
<TargetLibraryAnalysis
>(F
),
1927 AM
.getResult
<AssumptionAnalysis
>(F
),
1928 &AM
.getResult
<DominatorTreeAnalysis
>(F
),
1929 AM
.getCachedResult
<LoopAnalysis
>(F
),
1930 AM
.getCachedResult
<PhiValuesAnalysis
>(F
));
1933 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID
) {
1934 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1937 char BasicAAWrapperPass::ID
= 0;
1939 void BasicAAWrapperPass::anchor() {}
1941 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass
, "basicaa",
1942 "Basic Alias Analysis (stateless AA impl)", false, true)
1943 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
1944 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
1945 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
1946 INITIALIZE_PASS_END(BasicAAWrapperPass
, "basicaa",
1947 "Basic Alias Analysis (stateless AA impl)", false, true)
1949 FunctionPass
*llvm::createBasicAAWrapperPass() {
1950 return new BasicAAWrapperPass();
1953 bool BasicAAWrapperPass::runOnFunction(Function
&F
) {
1954 auto &ACT
= getAnalysis
<AssumptionCacheTracker
>();
1955 auto &TLIWP
= getAnalysis
<TargetLibraryInfoWrapperPass
>();
1956 auto &DTWP
= getAnalysis
<DominatorTreeWrapperPass
>();
1957 auto *LIWP
= getAnalysisIfAvailable
<LoopInfoWrapperPass
>();
1958 auto *PVWP
= getAnalysisIfAvailable
<PhiValuesWrapperPass
>();
1960 Result
.reset(new BasicAAResult(F
.getParent()->getDataLayout(), F
, TLIWP
.getTLI(),
1961 ACT
.getAssumptionCache(F
), &DTWP
.getDomTree(),
1962 LIWP
? &LIWP
->getLoopInfo() : nullptr,
1963 PVWP
? &PVWP
->getResult() : nullptr));
1968 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
1969 AU
.setPreservesAll();
1970 AU
.addRequired
<AssumptionCacheTracker
>();
1971 AU
.addRequired
<DominatorTreeWrapperPass
>();
1972 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
1973 AU
.addUsedIfAvailable
<PhiValuesWrapperPass
>();
1976 BasicAAResult
llvm::createLegacyPMBasicAAResult(Pass
&P
, Function
&F
) {
1977 return BasicAAResult(
1978 F
.getParent()->getDataLayout(),
1980 P
.getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(),
1981 P
.getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
));