1 //===- BasicAliasAnalysis.cpp - Local Alias Analysis Impl -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the default implementation of the Alias Analysis interface
11 // that simply implements a few identities (two different globals cannot alias,
12 // etc), but otherwise does no analysis.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/AliasAnalysis.h"
17 #include "llvm/Analysis/Passes.h"
18 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Function.h"
21 #include "llvm/GlobalVariable.h"
22 #include "llvm/Instructions.h"
23 #include "llvm/Pass.h"
24 #include "llvm/Target/TargetData.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/GetElementPtrTypeIterator.h"
28 #include "llvm/Support/ManagedStatic.h"
33 /// NoAA - This class implements the -no-aa pass, which always returns "I
34 /// don't know" for alias queries. NoAA is unlike other alias analysis
35 /// implementations, in that it does not chain to a previous analysis. As
36 /// such it doesn't follow many of the rules that other alias analyses must.
38 struct VISIBILITY_HIDDEN NoAA
: public ImmutablePass
, public AliasAnalysis
{
39 static char ID
; // Class identification, replacement for typeinfo
40 NoAA() : ImmutablePass((intptr_t)&ID
) {}
41 NoAA(intptr_t PID
) : ImmutablePass(PID
) { }
43 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
44 AU
.addRequired
<TargetData
>();
47 virtual void initializePass() {
48 TD
= &getAnalysis
<TargetData
>();
51 virtual AliasResult
alias(const Value
*V1
, unsigned V1Size
,
52 const Value
*V2
, unsigned V2Size
) {
56 virtual ModRefBehavior
getModRefBehavior(Function
*F
, CallSite CS
,
57 std::vector
<PointerAccessInfo
> *Info
) {
58 return UnknownModRefBehavior
;
61 virtual void getArgumentAccesses(Function
*F
, CallSite CS
,
62 std::vector
<PointerAccessInfo
> &Info
) {
63 assert(0 && "This method may not be called on this function!");
66 virtual void getMustAliases(Value
*P
, std::vector
<Value
*> &RetVals
) { }
67 virtual bool pointsToConstantMemory(const Value
*P
) { return false; }
68 virtual ModRefResult
getModRefInfo(CallSite CS
, Value
*P
, unsigned Size
) {
71 virtual ModRefResult
getModRefInfo(CallSite CS1
, CallSite CS2
) {
74 virtual bool hasNoModRefInfoForCalls() const { return true; }
76 virtual void deleteValue(Value
*V
) {}
77 virtual void copyValue(Value
*From
, Value
*To
) {}
80 // Register this pass...
83 U("no-aa", "No Alias Analysis (always returns 'may' alias)");
85 // Declare that we implement the AliasAnalysis interface
86 RegisterAnalysisGroup
<AliasAnalysis
> V(U
);
87 } // End of anonymous namespace
89 ImmutablePass
*llvm::createNoAAPass() { return new NoAA(); }
92 /// BasicAliasAnalysis - This is the default alias analysis implementation.
93 /// Because it doesn't chain to a previous alias analysis (like -no-aa), it
94 /// derives from the NoAA class.
95 struct VISIBILITY_HIDDEN BasicAliasAnalysis
: public NoAA
{
96 static char ID
; // Class identification, replacement for typeinfo
97 BasicAliasAnalysis() : NoAA((intptr_t)&ID
) { }
98 AliasResult
alias(const Value
*V1
, unsigned V1Size
,
99 const Value
*V2
, unsigned V2Size
);
101 ModRefResult
getModRefInfo(CallSite CS
, Value
*P
, unsigned Size
);
102 ModRefResult
getModRefInfo(CallSite CS1
, CallSite CS2
) {
103 return NoAA::getModRefInfo(CS1
,CS2
);
106 /// hasNoModRefInfoForCalls - We can provide mod/ref information against
107 /// non-escaping allocations.
108 virtual bool hasNoModRefInfoForCalls() const { return false; }
110 /// pointsToConstantMemory - Chase pointers until we find a (constant
112 bool pointsToConstantMemory(const Value
*P
);
114 virtual ModRefBehavior
getModRefBehavior(Function
*F
, CallSite CS
,
115 std::vector
<PointerAccessInfo
> *Info
);
118 // CheckGEPInstructions - Check two GEP instructions with known
119 // must-aliasing base pointers. This checks to see if the index expressions
120 // preclude the pointers from aliasing...
122 CheckGEPInstructions(const Type
* BasePtr1Ty
,
123 Value
**GEP1Ops
, unsigned NumGEP1Ops
, unsigned G1Size
,
124 const Type
*BasePtr2Ty
,
125 Value
**GEP2Ops
, unsigned NumGEP2Ops
, unsigned G2Size
);
128 // Register this pass...
129 char BasicAliasAnalysis::ID
= 0;
130 RegisterPass
<BasicAliasAnalysis
>
131 X("basicaa", "Basic Alias Analysis (default AA impl)");
133 // Declare that we implement the AliasAnalysis interface
134 RegisterAnalysisGroup
<AliasAnalysis
, true> Y(X
);
135 } // End of anonymous namespace
137 ImmutablePass
*llvm::createBasicAliasAnalysisPass() {
138 return new BasicAliasAnalysis();
141 // getUnderlyingObject - This traverses the use chain to figure out what object
142 // the specified value points to. If the value points to, or is derived from, a
143 // unique object or an argument, return it.
144 static const Value
*getUnderlyingObject(const Value
*V
) {
145 if (!isa
<PointerType
>(V
->getType())) return 0;
147 // If we are at some type of object, return it. GlobalValues and Allocations
148 // have unique addresses.
149 if (isa
<GlobalValue
>(V
) || isa
<AllocationInst
>(V
) || isa
<Argument
>(V
))
152 // Traverse through different addressing mechanisms...
153 if (const Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
154 if (isa
<BitCastInst
>(I
) || isa
<GetElementPtrInst
>(I
))
155 return getUnderlyingObject(I
->getOperand(0));
156 } else if (const ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V
)) {
157 if (CE
->getOpcode() == Instruction::BitCast
||
158 CE
->getOpcode() == Instruction::GetElementPtr
)
159 return getUnderlyingObject(CE
->getOperand(0));
164 static const User
*isGEP(const Value
*V
) {
165 if (isa
<GetElementPtrInst
>(V
) ||
166 (isa
<ConstantExpr
>(V
) &&
167 cast
<ConstantExpr
>(V
)->getOpcode() == Instruction::GetElementPtr
))
168 return cast
<User
>(V
);
172 static const Value
*GetGEPOperands(const Value
*V
,
173 SmallVector
<Value
*, 16> &GEPOps
){
174 assert(GEPOps
.empty() && "Expect empty list to populate!");
175 GEPOps
.insert(GEPOps
.end(), cast
<User
>(V
)->op_begin()+1,
176 cast
<User
>(V
)->op_end());
178 // Accumulate all of the chained indexes into the operand array
179 V
= cast
<User
>(V
)->getOperand(0);
181 while (const User
*G
= isGEP(V
)) {
182 if (!isa
<Constant
>(GEPOps
[0]) || isa
<GlobalValue
>(GEPOps
[0]) ||
183 !cast
<Constant
>(GEPOps
[0])->isNullValue())
184 break; // Don't handle folding arbitrary pointer offsets yet...
185 GEPOps
.erase(GEPOps
.begin()); // Drop the zero index
186 GEPOps
.insert(GEPOps
.begin(), G
->op_begin()+1, G
->op_end());
187 V
= G
->getOperand(0);
192 /// pointsToConstantMemory - Chase pointers until we find a (constant
194 bool BasicAliasAnalysis::pointsToConstantMemory(const Value
*P
) {
195 if (const Value
*V
= getUnderlyingObject(P
))
196 if (const GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
))
197 return GV
->isConstant();
201 // Determine if an AllocationInst instruction escapes from the function it is
202 // contained in. If it does not escape, there is no way for another function to
203 // mod/ref it. We do this by looking at its uses and determining if the uses
204 // can escape (recursively).
205 static bool AddressMightEscape(const Value
*V
) {
206 for (Value::use_const_iterator UI
= V
->use_begin(), E
= V
->use_end();
208 const Instruction
*I
= cast
<Instruction
>(*UI
);
209 switch (I
->getOpcode()) {
210 case Instruction::Load
:
212 case Instruction::Store
:
213 if (I
->getOperand(0) == V
)
214 return true; // Escapes if the pointer is stored.
216 case Instruction::GetElementPtr
:
217 if (AddressMightEscape(I
))
219 case Instruction::BitCast
:
220 if (!isa
<PointerType
>(I
->getType()))
222 if (AddressMightEscape(I
))
225 case Instruction::Ret
:
226 // If returned, the address will escape to calling functions, but no
227 // callees could modify it.
236 // getModRefInfo - Check to see if the specified callsite can clobber the
237 // specified memory object. Since we only look at local properties of this
238 // function, we really can't say much about this query. We do, however, use
239 // simple "address taken" analysis on local objects.
241 AliasAnalysis::ModRefResult
242 BasicAliasAnalysis::getModRefInfo(CallSite CS
, Value
*P
, unsigned Size
) {
243 if (!isa
<Constant
>(P
))
244 if (const AllocationInst
*AI
=
245 dyn_cast_or_null
<AllocationInst
>(getUnderlyingObject(P
))) {
246 // Okay, the pointer is to a stack allocated object. If we can prove that
247 // the pointer never "escapes", then we know the call cannot clobber it,
248 // because it simply can't get its address.
249 if (!AddressMightEscape(AI
))
252 // If this is a tail call and P points to a stack location, we know that
253 // the tail call cannot access or modify the local stack.
254 if (CallInst
*CI
= dyn_cast
<CallInst
>(CS
.getInstruction()))
255 if (CI
->isTailCall() && isa
<AllocaInst
>(AI
))
259 // The AliasAnalysis base class has some smarts, lets use them.
260 return AliasAnalysis::getModRefInfo(CS
, P
, Size
);
263 // alias - Provide a bunch of ad-hoc rules to disambiguate in common cases, such
264 // as array references. Note that this function is heavily tail recursive.
265 // Hopefully we have a smart C++ compiler. :)
267 AliasAnalysis::AliasResult
268 BasicAliasAnalysis::alias(const Value
*V1
, unsigned V1Size
,
269 const Value
*V2
, unsigned V2Size
) {
270 // Strip off any constant expression casts if they exist
271 if (const ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V1
))
272 if (CE
->isCast() && isa
<PointerType
>(CE
->getOperand(0)->getType()))
273 V1
= CE
->getOperand(0);
274 if (const ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V2
))
275 if (CE
->isCast() && isa
<PointerType
>(CE
->getOperand(0)->getType()))
276 V2
= CE
->getOperand(0);
278 // Are we checking for alias of the same value?
279 if (V1
== V2
) return MustAlias
;
281 if ((!isa
<PointerType
>(V1
->getType()) || !isa
<PointerType
>(V2
->getType())) &&
282 V1
->getType() != Type::Int64Ty
&& V2
->getType() != Type::Int64Ty
)
283 return NoAlias
; // Scalars cannot alias each other
285 // Strip off cast instructions...
286 if (const BitCastInst
*I
= dyn_cast
<BitCastInst
>(V1
))
287 return alias(I
->getOperand(0), V1Size
, V2
, V2Size
);
288 if (const BitCastInst
*I
= dyn_cast
<BitCastInst
>(V2
))
289 return alias(V1
, V1Size
, I
->getOperand(0), V2Size
);
291 // Figure out what objects these things are pointing to if we can...
292 const Value
*O1
= getUnderlyingObject(V1
);
293 const Value
*O2
= getUnderlyingObject(V2
);
295 // Pointing at a discernible object?
298 if (isa
<Argument
>(O1
)) {
299 // Incoming argument cannot alias locally allocated object!
300 if (isa
<AllocationInst
>(O2
)) return NoAlias
;
301 // Otherwise, nothing is known...
302 } else if (isa
<Argument
>(O2
)) {
303 // Incoming argument cannot alias locally allocated object!
304 if (isa
<AllocationInst
>(O1
)) return NoAlias
;
305 // Otherwise, nothing is known...
306 } else if (O1
!= O2
) {
307 // If they are two different objects, we know that we have no alias...
311 // If they are the same object, they we can look at the indexes. If they
312 // index off of the object is the same for both pointers, they must alias.
313 // If they are provably different, they must not alias. Otherwise, we
314 // can't tell anything.
318 if (!isa
<Argument
>(O1
) && isa
<ConstantPointerNull
>(V2
))
319 return NoAlias
; // Unique values don't alias null
321 if (isa
<GlobalVariable
>(O1
) ||
322 (isa
<AllocationInst
>(O1
) &&
323 !cast
<AllocationInst
>(O1
)->isArrayAllocation()))
324 if (cast
<PointerType
>(O1
->getType())->getElementType()->isSized()) {
325 // If the size of the other access is larger than the total size of the
326 // global/alloca/malloc, it cannot be accessing the global (it's
327 // undefined to load or store bytes before or after an object).
328 const Type
*ElTy
= cast
<PointerType
>(O1
->getType())->getElementType();
329 unsigned GlobalSize
= getTargetData().getTypeSize(ElTy
);
330 if (GlobalSize
< V2Size
&& V2Size
!= ~0U)
336 if (!isa
<Argument
>(O2
) && isa
<ConstantPointerNull
>(V1
))
337 return NoAlias
; // Unique values don't alias null
339 if (isa
<GlobalVariable
>(O2
) ||
340 (isa
<AllocationInst
>(O2
) &&
341 !cast
<AllocationInst
>(O2
)->isArrayAllocation()))
342 if (cast
<PointerType
>(O2
->getType())->getElementType()->isSized()) {
343 // If the size of the other access is larger than the total size of the
344 // global/alloca/malloc, it cannot be accessing the object (it's
345 // undefined to load or store bytes before or after an object).
346 const Type
*ElTy
= cast
<PointerType
>(O2
->getType())->getElementType();
347 unsigned GlobalSize
= getTargetData().getTypeSize(ElTy
);
348 if (GlobalSize
< V1Size
&& V1Size
!= ~0U)
353 // If we have two gep instructions with must-alias'ing base pointers, figure
354 // out if the indexes to the GEP tell us anything about the derived pointer.
355 // Note that we also handle chains of getelementptr instructions as well as
356 // constant expression getelementptrs here.
358 if (isGEP(V1
) && isGEP(V2
)) {
359 // Drill down into the first non-gep value, to test for must-aliasing of
360 // the base pointers.
361 const Value
*BasePtr1
= V1
, *BasePtr2
= V2
;
363 BasePtr1
= cast
<User
>(BasePtr1
)->getOperand(0);
364 } while (isGEP(BasePtr1
) &&
365 cast
<User
>(BasePtr1
)->getOperand(1) ==
366 Constant::getNullValue(cast
<User
>(BasePtr1
)->getOperand(1)->getType()));
368 BasePtr2
= cast
<User
>(BasePtr2
)->getOperand(0);
369 } while (isGEP(BasePtr2
) &&
370 cast
<User
>(BasePtr2
)->getOperand(1) ==
371 Constant::getNullValue(cast
<User
>(BasePtr2
)->getOperand(1)->getType()));
373 // Do the base pointers alias?
374 AliasResult BaseAlias
= alias(BasePtr1
, ~0U, BasePtr2
, ~0U);
375 if (BaseAlias
== NoAlias
) return NoAlias
;
376 if (BaseAlias
== MustAlias
) {
377 // If the base pointers alias each other exactly, check to see if we can
378 // figure out anything about the resultant pointers, to try to prove
381 // Collect all of the chained GEP operands together into one simple place
382 SmallVector
<Value
*, 16> GEP1Ops
, GEP2Ops
;
383 BasePtr1
= GetGEPOperands(V1
, GEP1Ops
);
384 BasePtr2
= GetGEPOperands(V2
, GEP2Ops
);
386 // If GetGEPOperands were able to fold to the same must-aliased pointer,
387 // do the comparison.
388 if (BasePtr1
== BasePtr2
) {
390 CheckGEPInstructions(BasePtr1
->getType(),
391 &GEP1Ops
[0], GEP1Ops
.size(), V1Size
,
393 &GEP2Ops
[0], GEP2Ops
.size(), V2Size
);
394 if (GAlias
!= MayAlias
)
400 // Check to see if these two pointers are related by a getelementptr
401 // instruction. If one pointer is a GEP with a non-zero index of the other
402 // pointer, we know they cannot alias.
406 std::swap(V1Size
, V2Size
);
409 if (V1Size
!= ~0U && V2Size
!= ~0U)
411 SmallVector
<Value
*, 16> GEPOperands
;
412 const Value
*BasePtr
= GetGEPOperands(V1
, GEPOperands
);
414 AliasResult R
= alias(BasePtr
, V1Size
, V2
, V2Size
);
415 if (R
== MustAlias
) {
416 // If there is at least one non-zero constant index, we know they cannot
418 bool ConstantFound
= false;
419 bool AllZerosFound
= true;
420 for (unsigned i
= 0, e
= GEPOperands
.size(); i
!= e
; ++i
)
421 if (const Constant
*C
= dyn_cast
<Constant
>(GEPOperands
[i
])) {
422 if (!C
->isNullValue()) {
423 ConstantFound
= true;
424 AllZerosFound
= false;
428 AllZerosFound
= false;
431 // If we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 must aliases
432 // the ptr, the end result is a must alias also.
437 if (V2Size
<= 1 && V1Size
<= 1) // Just pointer check?
440 // Otherwise we have to check to see that the distance is more than
441 // the size of the argument... build an index vector that is equal to
442 // the arguments provided, except substitute 0's for any variable
443 // indexes we find...
444 if (cast
<PointerType
>(
445 BasePtr
->getType())->getElementType()->isSized()) {
446 for (unsigned i
= 0; i
!= GEPOperands
.size(); ++i
)
447 if (!isa
<ConstantInt
>(GEPOperands
[i
]))
449 Constant::getNullValue(GEPOperands
[i
]->getType());
451 getTargetData().getIndexedOffset(BasePtr
->getType(),
455 if (Offset
>= (int64_t)V2Size
|| Offset
<= -(int64_t)V1Size
)
465 // This function is used to determin if the indices of two GEP instructions are
466 // equal. V1 and V2 are the indices.
467 static bool IndexOperandsEqual(Value
*V1
, Value
*V2
) {
468 if (V1
->getType() == V2
->getType())
470 if (Constant
*C1
= dyn_cast
<Constant
>(V1
))
471 if (Constant
*C2
= dyn_cast
<Constant
>(V2
)) {
472 // Sign extend the constants to long types, if necessary
473 if (C1
->getType() != Type::Int64Ty
)
474 C1
= ConstantExpr::getSExt(C1
, Type::Int64Ty
);
475 if (C2
->getType() != Type::Int64Ty
)
476 C2
= ConstantExpr::getSExt(C2
, Type::Int64Ty
);
482 /// CheckGEPInstructions - Check two GEP instructions with known must-aliasing
483 /// base pointers. This checks to see if the index expressions preclude the
484 /// pointers from aliasing...
485 AliasAnalysis::AliasResult
486 BasicAliasAnalysis::CheckGEPInstructions(
487 const Type
* BasePtr1Ty
, Value
**GEP1Ops
, unsigned NumGEP1Ops
, unsigned G1S
,
488 const Type
*BasePtr2Ty
, Value
**GEP2Ops
, unsigned NumGEP2Ops
, unsigned G2S
) {
489 // We currently can't handle the case when the base pointers have different
490 // primitive types. Since this is uncommon anyway, we are happy being
491 // extremely conservative.
492 if (BasePtr1Ty
!= BasePtr2Ty
)
495 const PointerType
*GEPPointerTy
= cast
<PointerType
>(BasePtr1Ty
);
497 // Find the (possibly empty) initial sequence of equal values... which are not
498 // necessarily constants.
499 unsigned NumGEP1Operands
= NumGEP1Ops
, NumGEP2Operands
= NumGEP2Ops
;
500 unsigned MinOperands
= std::min(NumGEP1Operands
, NumGEP2Operands
);
501 unsigned MaxOperands
= std::max(NumGEP1Operands
, NumGEP2Operands
);
502 unsigned UnequalOper
= 0;
503 while (UnequalOper
!= MinOperands
&&
504 IndexOperandsEqual(GEP1Ops
[UnequalOper
], GEP2Ops
[UnequalOper
])) {
505 // Advance through the type as we go...
507 if (const CompositeType
*CT
= dyn_cast
<CompositeType
>(BasePtr1Ty
))
508 BasePtr1Ty
= CT
->getTypeAtIndex(GEP1Ops
[UnequalOper
-1]);
510 // If all operands equal each other, then the derived pointers must
511 // alias each other...
513 assert(UnequalOper
== NumGEP1Operands
&& UnequalOper
== NumGEP2Operands
&&
514 "Ran out of type nesting, but not out of operands?");
519 // If we have seen all constant operands, and run out of indexes on one of the
520 // getelementptrs, check to see if the tail of the leftover one is all zeros.
521 // If so, return mustalias.
522 if (UnequalOper
== MinOperands
) {
523 if (NumGEP1Ops
< NumGEP2Ops
) {
524 std::swap(GEP1Ops
, GEP2Ops
);
525 std::swap(NumGEP1Ops
, NumGEP2Ops
);
528 bool AllAreZeros
= true;
529 for (unsigned i
= UnequalOper
; i
!= MaxOperands
; ++i
)
530 if (!isa
<Constant
>(GEP1Ops
[i
]) ||
531 !cast
<Constant
>(GEP1Ops
[i
])->isNullValue()) {
535 if (AllAreZeros
) return MustAlias
;
539 // So now we know that the indexes derived from the base pointers,
540 // which are known to alias, are different. We can still determine a
541 // no-alias result if there are differing constant pairs in the index
542 // chain. For example:
543 // A[i][0] != A[j][1] iff (&A[0][1]-&A[0][0] >= std::max(G1S, G2S))
545 // We have to be careful here about array accesses. In particular, consider:
546 // A[1][0] vs A[0][i]
547 // In this case, we don't *know* that the array will be accessed in bounds:
548 // the index could even be negative. Because of this, we have to
549 // conservatively *give up* and return may alias. We disregard differing
550 // array subscripts that are followed by a variable index without going
553 unsigned SizeMax
= std::max(G1S
, G2S
);
554 if (SizeMax
== ~0U) return MayAlias
; // Avoid frivolous work.
556 // Scan for the first operand that is constant and unequal in the
557 // two getelementptrs...
558 unsigned FirstConstantOper
= UnequalOper
;
559 for (; FirstConstantOper
!= MinOperands
; ++FirstConstantOper
) {
560 const Value
*G1Oper
= GEP1Ops
[FirstConstantOper
];
561 const Value
*G2Oper
= GEP2Ops
[FirstConstantOper
];
563 if (G1Oper
!= G2Oper
) // Found non-equal constant indexes...
564 if (Constant
*G1OC
= dyn_cast
<ConstantInt
>(const_cast<Value
*>(G1Oper
)))
565 if (Constant
*G2OC
= dyn_cast
<ConstantInt
>(const_cast<Value
*>(G2Oper
))){
566 if (G1OC
->getType() != G2OC
->getType()) {
567 // Sign extend both operands to long.
568 if (G1OC
->getType() != Type::Int64Ty
)
569 G1OC
= ConstantExpr::getSExt(G1OC
, Type::Int64Ty
);
570 if (G2OC
->getType() != Type::Int64Ty
)
571 G2OC
= ConstantExpr::getSExt(G2OC
, Type::Int64Ty
);
572 GEP1Ops
[FirstConstantOper
] = G1OC
;
573 GEP2Ops
[FirstConstantOper
] = G2OC
;
577 // Handle the "be careful" case above: if this is an array/packed
578 // subscript, scan for a subsequent variable array index.
579 if (isa
<SequentialType
>(BasePtr1Ty
)) {
581 cast
<SequentialType
>(BasePtr1Ty
)->getElementType();
582 bool isBadCase
= false;
584 for (unsigned Idx
= FirstConstantOper
+1;
585 Idx
!= MinOperands
&& isa
<SequentialType
>(NextTy
); ++Idx
) {
586 const Value
*V1
= GEP1Ops
[Idx
], *V2
= GEP2Ops
[Idx
];
587 if (!isa
<Constant
>(V1
) || !isa
<Constant
>(V2
)) {
591 NextTy
= cast
<SequentialType
>(NextTy
)->getElementType();
594 if (isBadCase
) G1OC
= 0;
597 // Make sure they are comparable (ie, not constant expressions), and
598 // make sure the GEP with the smaller leading constant is GEP1.
600 Constant
*Compare
= ConstantExpr::getICmp(ICmpInst::ICMP_SGT
,
602 if (ConstantInt
*CV
= dyn_cast
<ConstantInt
>(Compare
)) {
603 if (CV
->getZExtValue()) { // If they are comparable and G2 > G1
604 std::swap(GEP1Ops
, GEP2Ops
); // Make GEP1 < GEP2
605 std::swap(NumGEP1Ops
, NumGEP2Ops
);
612 BasePtr1Ty
= cast
<CompositeType
>(BasePtr1Ty
)->getTypeAtIndex(G1Oper
);
615 // No shared constant operands, and we ran out of common operands. At this
616 // point, the GEP instructions have run through all of their operands, and we
617 // haven't found evidence that there are any deltas between the GEP's.
618 // However, one GEP may have more operands than the other. If this is the
619 // case, there may still be hope. Check this now.
620 if (FirstConstantOper
== MinOperands
) {
621 // Make GEP1Ops be the longer one if there is a longer one.
622 if (NumGEP1Ops
< NumGEP2Ops
) {
623 std::swap(GEP1Ops
, GEP2Ops
);
624 std::swap(NumGEP1Ops
, NumGEP2Ops
);
627 // Is there anything to check?
628 if (NumGEP1Ops
> MinOperands
) {
629 for (unsigned i
= FirstConstantOper
; i
!= MaxOperands
; ++i
)
630 if (isa
<ConstantInt
>(GEP1Ops
[i
]) &&
631 !cast
<ConstantInt
>(GEP1Ops
[i
])->isZero()) {
632 // Yup, there's a constant in the tail. Set all variables to
633 // constants in the GEP instruction to make it suiteable for
634 // TargetData::getIndexedOffset.
635 for (i
= 0; i
!= MaxOperands
; ++i
)
636 if (!isa
<ConstantInt
>(GEP1Ops
[i
]))
637 GEP1Ops
[i
] = Constant::getNullValue(GEP1Ops
[i
]->getType());
638 // Okay, now get the offset. This is the relative offset for the full
640 const TargetData
&TD
= getTargetData();
641 int64_t Offset1
= TD
.getIndexedOffset(GEPPointerTy
, GEP1Ops
,
644 // Now check without any constants at the end.
645 int64_t Offset2
= TD
.getIndexedOffset(GEPPointerTy
, GEP1Ops
,
648 // If the tail provided a bit enough offset, return noalias!
649 if ((uint64_t)(Offset2
-Offset1
) >= SizeMax
)
654 // Couldn't find anything useful.
658 // If there are non-equal constants arguments, then we can figure
659 // out a minimum known delta between the two index expressions... at
660 // this point we know that the first constant index of GEP1 is less
661 // than the first constant index of GEP2.
663 // Advance BasePtr[12]Ty over this first differing constant operand.
664 BasePtr2Ty
= cast
<CompositeType
>(BasePtr1Ty
)->
665 getTypeAtIndex(GEP2Ops
[FirstConstantOper
]);
666 BasePtr1Ty
= cast
<CompositeType
>(BasePtr1Ty
)->
667 getTypeAtIndex(GEP1Ops
[FirstConstantOper
]);
669 // We are going to be using TargetData::getIndexedOffset to determine the
670 // offset that each of the GEP's is reaching. To do this, we have to convert
671 // all variable references to constant references. To do this, we convert the
672 // initial sequence of array subscripts into constant zeros to start with.
673 const Type
*ZeroIdxTy
= GEPPointerTy
;
674 for (unsigned i
= 0; i
!= FirstConstantOper
; ++i
) {
675 if (!isa
<StructType
>(ZeroIdxTy
))
676 GEP1Ops
[i
] = GEP2Ops
[i
] = Constant::getNullValue(Type::Int32Ty
);
678 if (const CompositeType
*CT
= dyn_cast
<CompositeType
>(ZeroIdxTy
))
679 ZeroIdxTy
= CT
->getTypeAtIndex(GEP1Ops
[i
]);
682 // We know that GEP1Ops[FirstConstantOper] & GEP2Ops[FirstConstantOper] are ok
684 // Loop over the rest of the operands...
685 for (unsigned i
= FirstConstantOper
+1; i
!= MaxOperands
; ++i
) {
686 const Value
*Op1
= i
< NumGEP1Ops
? GEP1Ops
[i
] : 0;
687 const Value
*Op2
= i
< NumGEP2Ops
? GEP2Ops
[i
] : 0;
688 // If they are equal, use a zero index...
689 if (Op1
== Op2
&& BasePtr1Ty
== BasePtr2Ty
) {
690 if (!isa
<ConstantInt
>(Op1
))
691 GEP1Ops
[i
] = GEP2Ops
[i
] = Constant::getNullValue(Op1
->getType());
692 // Otherwise, just keep the constants we have.
695 if (const ConstantInt
*Op1C
= dyn_cast
<ConstantInt
>(Op1
)) {
696 // If this is an array index, make sure the array element is in range.
697 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(BasePtr1Ty
)) {
698 if (Op1C
->getZExtValue() >= AT
->getNumElements())
699 return MayAlias
; // Be conservative with out-of-range accesses
700 } else if (const VectorType
*PT
= dyn_cast
<VectorType
>(BasePtr1Ty
)) {
701 if (Op1C
->getZExtValue() >= PT
->getNumElements())
702 return MayAlias
; // Be conservative with out-of-range accesses
706 // GEP1 is known to produce a value less than GEP2. To be
707 // conservatively correct, we must assume the largest possible
708 // constant is used in this position. This cannot be the initial
709 // index to the GEP instructions (because we know we have at least one
710 // element before this one with the different constant arguments), so
711 // we know that the current index must be into either a struct or
712 // array. Because we know it's not constant, this cannot be a
713 // structure index. Because of this, we can calculate the maximum
716 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(BasePtr1Ty
))
717 GEP1Ops
[i
] = ConstantInt::get(Type::Int64Ty
,AT
->getNumElements()-1);
718 else if (const VectorType
*PT
= dyn_cast
<VectorType
>(BasePtr1Ty
))
719 GEP1Ops
[i
] = ConstantInt::get(Type::Int64Ty
,PT
->getNumElements()-1);
725 if (const ConstantInt
*Op2C
= dyn_cast
<ConstantInt
>(Op2
)) {
726 // If this is an array index, make sure the array element is in range.
727 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(BasePtr1Ty
)) {
728 if (Op2C
->getZExtValue() >= AT
->getNumElements())
729 return MayAlias
; // Be conservative with out-of-range accesses
730 } else if (const VectorType
*PT
= dyn_cast
<VectorType
>(BasePtr1Ty
)) {
731 if (Op2C
->getZExtValue() >= PT
->getNumElements())
732 return MayAlias
; // Be conservative with out-of-range accesses
734 } else { // Conservatively assume the minimum value for this index
735 GEP2Ops
[i
] = Constant::getNullValue(Op2
->getType());
740 if (BasePtr1Ty
&& Op1
) {
741 if (const CompositeType
*CT
= dyn_cast
<CompositeType
>(BasePtr1Ty
))
742 BasePtr1Ty
= CT
->getTypeAtIndex(GEP1Ops
[i
]);
747 if (BasePtr2Ty
&& Op2
) {
748 if (const CompositeType
*CT
= dyn_cast
<CompositeType
>(BasePtr2Ty
))
749 BasePtr2Ty
= CT
->getTypeAtIndex(GEP2Ops
[i
]);
755 if (GEPPointerTy
->getElementType()->isSized()) {
757 getTargetData().getIndexedOffset(GEPPointerTy
, GEP1Ops
, NumGEP1Ops
);
759 getTargetData().getIndexedOffset(GEPPointerTy
, GEP2Ops
, NumGEP2Ops
);
760 assert(Offset1
<Offset2
&& "There is at least one different constant here!");
762 if ((uint64_t)(Offset2
-Offset1
) >= SizeMax
) {
763 //cerr << "Determined that these two GEP's don't alias ["
764 // << SizeMax << " bytes]: \n" << *GEP1 << *GEP2;
772 struct VISIBILITY_HIDDEN StringCompare
{
773 bool operator()(const char *LHS
, const char *RHS
) {
774 return strcmp(LHS
, RHS
) < 0;
779 // Note that this list cannot contain libm functions (such as acos and sqrt)
780 // that set errno on a domain or other error.
781 static const char *DoesntAccessMemoryFns
[] = {
782 "abs", "labs", "llabs", "imaxabs", "fabs", "fabsf", "fabsl",
783 "trunc", "truncf", "truncl", "ldexp",
785 "atan", "atanf", "atanl", "atan2", "atan2f", "atan2l",
787 "cos", "cosf", "cosl",
788 "exp", "expf", "expl",
790 "sin", "sinf", "sinl",
791 "tan", "tanf", "tanl", "tanh", "tanhf", "tanhl",
793 "floor", "floorf", "floorl", "ceil", "ceilf", "ceill",
796 "isalnum", "isalpha", "iscntrl", "isdigit", "isgraph", "islower", "isprint"
797 "ispunct", "isspace", "isupper", "isxdigit", "tolower", "toupper",
800 "iswalnum", "iswalpha", "iswcntrl", "iswdigit", "iswgraph", "iswlower",
801 "iswprint", "iswpunct", "iswspace", "iswupper", "iswxdigit",
803 "iswctype", "towctrans", "towlower", "towupper",
807 "isinf", "isnan", "finite",
809 // C99 math functions
810 "copysign", "copysignf", "copysignd",
811 "nexttoward", "nexttowardf", "nexttowardd",
812 "nextafter", "nextafterf", "nextafterd",
815 "__signbit", "__signbitf", "__signbitl",
819 static const char *OnlyReadsMemoryFns
[] = {
820 "atoi", "atol", "atof", "atoll", "atoq", "a64l",
821 "bcmp", "memcmp", "memchr", "memrchr", "wmemcmp", "wmemchr",
824 "strcmp", "strcasecmp", "strcoll", "strncmp", "strncasecmp",
825 "strchr", "strcspn", "strlen", "strpbrk", "strrchr", "strspn", "strstr",
829 "wcschr", "wcscmp", "wcscoll", "wcscspn", "wcslen", "wcsncmp", "wcspbrk",
830 "wcsrchr", "wcsspn", "wcsstr",
833 "alphasort", "alphasort64", "versionsort", "versionsort64",
836 "nan", "nanf", "nand",
839 "feof", "ferror", "fileno",
840 "feof_unlocked", "ferror_unlocked", "fileno_unlocked"
843 static ManagedStatic
<std::vector
<const char*> > NoMemoryTable
;
844 static ManagedStatic
<std::vector
<const char*> > OnlyReadsMemoryTable
;
847 AliasAnalysis::ModRefBehavior
848 BasicAliasAnalysis::getModRefBehavior(Function
*F
, CallSite CS
,
849 std::vector
<PointerAccessInfo
> *Info
) {
850 if (!F
->isDeclaration()) return UnknownModRefBehavior
;
852 static bool Initialized
= false;
854 NoMemoryTable
->insert(NoMemoryTable
->end(),
855 DoesntAccessMemoryFns
,
856 DoesntAccessMemoryFns
+
857 sizeof(DoesntAccessMemoryFns
)/sizeof(DoesntAccessMemoryFns
[0]));
859 OnlyReadsMemoryTable
->insert(OnlyReadsMemoryTable
->end(),
862 sizeof(OnlyReadsMemoryFns
)/sizeof(OnlyReadsMemoryFns
[0]));
863 #define GET_MODREF_BEHAVIOR
864 #include "llvm/Intrinsics.gen"
865 #undef GET_MODREF_BEHAVIOR
867 // Sort the table the first time through.
868 std::sort(NoMemoryTable
->begin(), NoMemoryTable
->end(), StringCompare());
869 std::sort(OnlyReadsMemoryTable
->begin(), OnlyReadsMemoryTable
->end(),
874 std::vector
<const char*>::iterator Ptr
=
875 std::lower_bound(NoMemoryTable
->begin(), NoMemoryTable
->end(),
876 F
->getName().c_str(), StringCompare());
877 if (Ptr
!= NoMemoryTable
->end() && *Ptr
== F
->getName())
878 return DoesNotAccessMemory
;
880 Ptr
= std::lower_bound(OnlyReadsMemoryTable
->begin(),
881 OnlyReadsMemoryTable
->end(),
882 F
->getName().c_str(), StringCompare());
883 if (Ptr
!= OnlyReadsMemoryTable
->end() && *Ptr
== F
->getName())
884 return OnlyReadsMemory
;
886 return UnknownModRefBehavior
;
889 // Make sure that anything that uses AliasAnalysis pulls in this file...
890 DEFINING_FILE_FOR(BasicAliasAnalysis
)