1 //==- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation --==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the generic AliasAnalysis interface which is used as the
10 // common interface used by all clients and implementations of alias analysis.
12 // This file also implements the default version of the AliasAnalysis interface
13 // that is to be used when no other implementation is specified. This does some
14 // simple tests that detect obvious cases: two different global pointers cannot
15 // alias, a global cannot alias a malloc, two different mallocs cannot alias,
18 // This alias analysis implementation really isn't very good for anything, but
19 // it is very fast, and makes a nice clean default implementation. Because it
20 // handles lots of little corner cases, other, more complex, alias analysis
21 // implementations may choose to rely on this pass to resolve these simple and
24 //===----------------------------------------------------------------------===//
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/BasicAliasAnalysis.h"
29 #include "llvm/Analysis/CaptureTracking.h"
30 #include "llvm/Analysis/GlobalsModRef.h"
31 #include "llvm/Analysis/MemoryLocation.h"
32 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
33 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
34 #include "llvm/Analysis/ScopedNoAliasAA.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/IR/Argument.h"
39 #include "llvm/IR/Attributes.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/Value.h"
45 #include "llvm/InitializePasses.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/AtomicOrdering.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
55 #define DEBUG_TYPE "aa"
59 STATISTIC(NumNoAlias
, "Number of NoAlias results");
60 STATISTIC(NumMayAlias
, "Number of MayAlias results");
61 STATISTIC(NumMustAlias
, "Number of MustAlias results");
64 /// Allow disabling BasicAA from the AA results. This is particularly useful
65 /// when testing to isolate a single AA implementation.
66 cl::opt
<bool> DisableBasicAA("disable-basic-aa", cl::Hidden
, cl::init(false));
70 /// Print a trace of alias analysis queries and their results.
71 static cl::opt
<bool> EnableAATrace("aa-trace", cl::Hidden
, cl::init(false));
73 static const bool EnableAATrace
= false;
76 AAResults::AAResults(AAResults
&&Arg
)
77 : TLI(Arg
.TLI
), AAs(std::move(Arg
.AAs
)), AADeps(std::move(Arg
.AADeps
)) {}
79 AAResults::~AAResults() {}
81 bool AAResults::invalidate(Function
&F
, const PreservedAnalyses
&PA
,
82 FunctionAnalysisManager::Invalidator
&Inv
) {
83 // AAResults preserves the AAManager by default, due to the stateless nature
84 // of AliasAnalysis. There is no need to check whether it has been preserved
85 // explicitly. Check if any module dependency was invalidated and caused the
86 // AAManager to be invalidated. Invalidate ourselves in that case.
87 auto PAC
= PA
.getChecker
<AAManager
>();
88 if (!PAC
.preservedWhenStateless())
91 // Check if any of the function dependencies were invalidated, and invalidate
92 // ourselves in that case.
93 for (AnalysisKey
*ID
: AADeps
)
94 if (Inv
.invalidate(ID
, F
, PA
))
97 // Everything we depend on is still fine, so are we. Nothing to invalidate.
101 //===----------------------------------------------------------------------===//
102 // Default chaining methods
103 //===----------------------------------------------------------------------===//
105 AliasResult
AAResults::alias(const MemoryLocation
&LocA
,
106 const MemoryLocation
&LocB
) {
107 SimpleAAQueryInfo
AAQIP(*this);
108 return alias(LocA
, LocB
, AAQIP
, nullptr);
111 AliasResult
AAResults::alias(const MemoryLocation
&LocA
,
112 const MemoryLocation
&LocB
, AAQueryInfo
&AAQI
,
113 const Instruction
*CtxI
) {
114 AliasResult Result
= AliasResult::MayAlias
;
117 for (unsigned I
= 0; I
< AAQI
.Depth
; ++I
)
119 dbgs() << "Start " << *LocA
.Ptr
<< " @ " << LocA
.Size
<< ", "
120 << *LocB
.Ptr
<< " @ " << LocB
.Size
<< "\n";
124 for (const auto &AA
: AAs
) {
125 Result
= AA
->alias(LocA
, LocB
, AAQI
, CtxI
);
126 if (Result
!= AliasResult::MayAlias
)
132 for (unsigned I
= 0; I
< AAQI
.Depth
; ++I
)
134 dbgs() << "End " << *LocA
.Ptr
<< " @ " << LocA
.Size
<< ", "
135 << *LocB
.Ptr
<< " @ " << LocB
.Size
<< " = " << Result
<< "\n";
138 if (AAQI
.Depth
== 0) {
139 if (Result
== AliasResult::NoAlias
)
141 else if (Result
== AliasResult::MustAlias
)
149 ModRefInfo
AAResults::getModRefInfoMask(const MemoryLocation
&Loc
,
151 SimpleAAQueryInfo
AAQIP(*this);
152 return getModRefInfoMask(Loc
, AAQIP
, IgnoreLocals
);
155 ModRefInfo
AAResults::getModRefInfoMask(const MemoryLocation
&Loc
,
156 AAQueryInfo
&AAQI
, bool IgnoreLocals
) {
157 ModRefInfo Result
= ModRefInfo::ModRef
;
159 for (const auto &AA
: AAs
) {
160 Result
&= AA
->getModRefInfoMask(Loc
, AAQI
, IgnoreLocals
);
162 // Early-exit the moment we reach the bottom of the lattice.
163 if (isNoModRef(Result
))
164 return ModRefInfo::NoModRef
;
170 ModRefInfo
AAResults::getArgModRefInfo(const CallBase
*Call
, unsigned ArgIdx
) {
171 ModRefInfo Result
= ModRefInfo::ModRef
;
173 for (const auto &AA
: AAs
) {
174 Result
&= AA
->getArgModRefInfo(Call
, ArgIdx
);
176 // Early-exit the moment we reach the bottom of the lattice.
177 if (isNoModRef(Result
))
178 return ModRefInfo::NoModRef
;
184 ModRefInfo
AAResults::getModRefInfo(const Instruction
*I
,
185 const CallBase
*Call2
) {
186 SimpleAAQueryInfo
AAQIP(*this);
187 return getModRefInfo(I
, Call2
, AAQIP
);
190 ModRefInfo
AAResults::getModRefInfo(const Instruction
*I
, const CallBase
*Call2
,
192 // We may have two calls.
193 if (const auto *Call1
= dyn_cast
<CallBase
>(I
)) {
194 // Check if the two calls modify the same memory.
195 return getModRefInfo(Call1
, Call2
, AAQI
);
197 // If this is a fence, just return ModRef.
198 if (I
->isFenceLike())
199 return ModRefInfo::ModRef
;
200 // Otherwise, check if the call modifies or references the
201 // location this memory access defines. The best we can say
202 // is that if the call references what this instruction
203 // defines, it must be clobbered by this location.
204 const MemoryLocation DefLoc
= MemoryLocation::get(I
);
205 ModRefInfo MR
= getModRefInfo(Call2
, DefLoc
, AAQI
);
206 if (isModOrRefSet(MR
))
207 return ModRefInfo::ModRef
;
208 return ModRefInfo::NoModRef
;
211 ModRefInfo
AAResults::getModRefInfo(const CallBase
*Call
,
212 const MemoryLocation
&Loc
,
214 ModRefInfo Result
= ModRefInfo::ModRef
;
216 for (const auto &AA
: AAs
) {
217 Result
&= AA
->getModRefInfo(Call
, Loc
, AAQI
);
219 // Early-exit the moment we reach the bottom of the lattice.
220 if (isNoModRef(Result
))
221 return ModRefInfo::NoModRef
;
224 // Try to refine the mod-ref info further using other API entry points to the
225 // aggregate set of AA results.
227 // We can completely ignore inaccessible memory here, because MemoryLocations
228 // can only reference accessible memory.
229 auto ME
= getMemoryEffects(Call
, AAQI
)
230 .getWithoutLoc(IRMemLocation::InaccessibleMem
);
231 if (ME
.doesNotAccessMemory())
232 return ModRefInfo::NoModRef
;
234 ModRefInfo ArgMR
= ME
.getModRef(IRMemLocation::ArgMem
);
235 ModRefInfo OtherMR
= ME
.getWithoutLoc(IRMemLocation::ArgMem
).getModRef();
236 if ((ArgMR
| OtherMR
) != OtherMR
) {
237 // Refine the modref info for argument memory. We only bother to do this
238 // if ArgMR is not a subset of OtherMR, otherwise this won't have an impact
239 // on the final result.
240 ModRefInfo AllArgsMask
= ModRefInfo::NoModRef
;
241 for (const auto &I
: llvm::enumerate(Call
->args())) {
242 const Value
*Arg
= I
.value();
243 if (!Arg
->getType()->isPointerTy())
245 unsigned ArgIdx
= I
.index();
246 MemoryLocation ArgLoc
= MemoryLocation::getForArgument(Call
, ArgIdx
, TLI
);
247 AliasResult ArgAlias
= alias(ArgLoc
, Loc
, AAQI
, Call
);
248 if (ArgAlias
!= AliasResult::NoAlias
)
249 AllArgsMask
|= getArgModRefInfo(Call
, ArgIdx
);
251 ArgMR
&= AllArgsMask
;
254 Result
&= ArgMR
| OtherMR
;
256 // Apply the ModRef mask. This ensures that if Loc is a constant memory
257 // location, we take into account the fact that the call definitely could not
258 // modify the memory location.
259 if (!isNoModRef(Result
))
260 Result
&= getModRefInfoMask(Loc
);
265 ModRefInfo
AAResults::getModRefInfo(const CallBase
*Call1
,
266 const CallBase
*Call2
, AAQueryInfo
&AAQI
) {
267 ModRefInfo Result
= ModRefInfo::ModRef
;
269 for (const auto &AA
: AAs
) {
270 Result
&= AA
->getModRefInfo(Call1
, Call2
, AAQI
);
272 // Early-exit the moment we reach the bottom of the lattice.
273 if (isNoModRef(Result
))
274 return ModRefInfo::NoModRef
;
277 // Try to refine the mod-ref info further using other API entry points to the
278 // aggregate set of AA results.
280 // If Call1 or Call2 are readnone, they don't interact.
281 auto Call1B
= getMemoryEffects(Call1
, AAQI
);
282 if (Call1B
.doesNotAccessMemory())
283 return ModRefInfo::NoModRef
;
285 auto Call2B
= getMemoryEffects(Call2
, AAQI
);
286 if (Call2B
.doesNotAccessMemory())
287 return ModRefInfo::NoModRef
;
289 // If they both only read from memory, there is no dependence.
290 if (Call1B
.onlyReadsMemory() && Call2B
.onlyReadsMemory())
291 return ModRefInfo::NoModRef
;
293 // If Call1 only reads memory, the only dependence on Call2 can be
294 // from Call1 reading memory written by Call2.
295 if (Call1B
.onlyReadsMemory())
296 Result
&= ModRefInfo::Ref
;
297 else if (Call1B
.onlyWritesMemory())
298 Result
&= ModRefInfo::Mod
;
300 // If Call2 only access memory through arguments, accumulate the mod/ref
301 // information from Call1's references to the memory referenced by
302 // Call2's arguments.
303 if (Call2B
.onlyAccessesArgPointees()) {
304 if (!Call2B
.doesAccessArgPointees())
305 return ModRefInfo::NoModRef
;
306 ModRefInfo R
= ModRefInfo::NoModRef
;
307 for (auto I
= Call2
->arg_begin(), E
= Call2
->arg_end(); I
!= E
; ++I
) {
308 const Value
*Arg
= *I
;
309 if (!Arg
->getType()->isPointerTy())
311 unsigned Call2ArgIdx
= std::distance(Call2
->arg_begin(), I
);
313 MemoryLocation::getForArgument(Call2
, Call2ArgIdx
, TLI
);
315 // ArgModRefC2 indicates what Call2 might do to Call2ArgLoc, and the
316 // dependence of Call1 on that location is the inverse:
317 // - If Call2 modifies location, dependence exists if Call1 reads or
319 // - If Call2 only reads location, dependence exists if Call1 writes.
320 ModRefInfo ArgModRefC2
= getArgModRefInfo(Call2
, Call2ArgIdx
);
321 ModRefInfo ArgMask
= ModRefInfo::NoModRef
;
322 if (isModSet(ArgModRefC2
))
323 ArgMask
= ModRefInfo::ModRef
;
324 else if (isRefSet(ArgModRefC2
))
325 ArgMask
= ModRefInfo::Mod
;
327 // ModRefC1 indicates what Call1 might do to Call2ArgLoc, and we use
328 // above ArgMask to update dependence info.
329 ArgMask
&= getModRefInfo(Call1
, Call2ArgLoc
, AAQI
);
331 R
= (R
| ArgMask
) & Result
;
339 // If Call1 only accesses memory through arguments, check if Call2 references
340 // any of the memory referenced by Call1's arguments. If not, return NoModRef.
341 if (Call1B
.onlyAccessesArgPointees()) {
342 if (!Call1B
.doesAccessArgPointees())
343 return ModRefInfo::NoModRef
;
344 ModRefInfo R
= ModRefInfo::NoModRef
;
345 for (auto I
= Call1
->arg_begin(), E
= Call1
->arg_end(); I
!= E
; ++I
) {
346 const Value
*Arg
= *I
;
347 if (!Arg
->getType()->isPointerTy())
349 unsigned Call1ArgIdx
= std::distance(Call1
->arg_begin(), I
);
351 MemoryLocation::getForArgument(Call1
, Call1ArgIdx
, TLI
);
353 // ArgModRefC1 indicates what Call1 might do to Call1ArgLoc; if Call1
354 // might Mod Call1ArgLoc, then we care about either a Mod or a Ref by
355 // Call2. If Call1 might Ref, then we care only about a Mod by Call2.
356 ModRefInfo ArgModRefC1
= getArgModRefInfo(Call1
, Call1ArgIdx
);
357 ModRefInfo ModRefC2
= getModRefInfo(Call2
, Call1ArgLoc
, AAQI
);
358 if ((isModSet(ArgModRefC1
) && isModOrRefSet(ModRefC2
)) ||
359 (isRefSet(ArgModRefC1
) && isModSet(ModRefC2
)))
360 R
= (R
| ArgModRefC1
) & Result
;
372 MemoryEffects
AAResults::getMemoryEffects(const CallBase
*Call
,
374 MemoryEffects Result
= MemoryEffects::unknown();
376 for (const auto &AA
: AAs
) {
377 Result
&= AA
->getMemoryEffects(Call
, AAQI
);
379 // Early-exit the moment we reach the bottom of the lattice.
380 if (Result
.doesNotAccessMemory())
387 MemoryEffects
AAResults::getMemoryEffects(const CallBase
*Call
) {
388 SimpleAAQueryInfo
AAQI(*this);
389 return getMemoryEffects(Call
, AAQI
);
392 MemoryEffects
AAResults::getMemoryEffects(const Function
*F
) {
393 MemoryEffects Result
= MemoryEffects::unknown();
395 for (const auto &AA
: AAs
) {
396 Result
&= AA
->getMemoryEffects(F
);
398 // Early-exit the moment we reach the bottom of the lattice.
399 if (Result
.doesNotAccessMemory())
406 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, AliasResult AR
) {
408 case AliasResult::NoAlias
:
411 case AliasResult::MustAlias
:
414 case AliasResult::MayAlias
:
417 case AliasResult::PartialAlias
:
418 OS
<< "PartialAlias";
420 OS
<< " (off " << AR
.getOffset() << ")";
426 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, ModRefInfo MR
) {
428 case ModRefInfo::NoModRef
:
431 case ModRefInfo::Ref
:
434 case ModRefInfo::Mod
:
437 case ModRefInfo::ModRef
:
444 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, MemoryEffects ME
) {
445 for (IRMemLocation Loc
: MemoryEffects::locations()) {
447 case IRMemLocation::ArgMem
:
450 case IRMemLocation::InaccessibleMem
:
451 OS
<< "InaccessibleMem: ";
453 case IRMemLocation::Other
:
457 OS
<< ME
.getModRef(Loc
) << ", ";
462 //===----------------------------------------------------------------------===//
463 // Helper method implementation
464 //===----------------------------------------------------------------------===//
466 ModRefInfo
AAResults::getModRefInfo(const LoadInst
*L
,
467 const MemoryLocation
&Loc
,
469 // Be conservative in the face of atomic.
470 if (isStrongerThan(L
->getOrdering(), AtomicOrdering::Unordered
))
471 return ModRefInfo::ModRef
;
473 // If the load address doesn't alias the given address, it doesn't read
474 // or write the specified memory.
476 AliasResult AR
= alias(MemoryLocation::get(L
), Loc
, AAQI
, L
);
477 if (AR
== AliasResult::NoAlias
)
478 return ModRefInfo::NoModRef
;
480 // Otherwise, a load just reads.
481 return ModRefInfo::Ref
;
484 ModRefInfo
AAResults::getModRefInfo(const StoreInst
*S
,
485 const MemoryLocation
&Loc
,
487 // Be conservative in the face of atomic.
488 if (isStrongerThan(S
->getOrdering(), AtomicOrdering::Unordered
))
489 return ModRefInfo::ModRef
;
492 AliasResult AR
= alias(MemoryLocation::get(S
), Loc
, AAQI
, S
);
493 // If the store address cannot alias the pointer in question, then the
494 // specified memory cannot be modified by the store.
495 if (AR
== AliasResult::NoAlias
)
496 return ModRefInfo::NoModRef
;
498 // Examine the ModRef mask. If Mod isn't present, then return NoModRef.
499 // This ensures that if Loc is a constant memory location, we take into
500 // account the fact that the store definitely could not modify the memory
502 if (!isModSet(getModRefInfoMask(Loc
)))
503 return ModRefInfo::NoModRef
;
506 // Otherwise, a store just writes.
507 return ModRefInfo::Mod
;
510 ModRefInfo
AAResults::getModRefInfo(const FenceInst
*S
,
511 const MemoryLocation
&Loc
,
513 // All we know about a fence instruction is what we get from the ModRef
514 // mask: if Loc is a constant memory location, the fence definitely could
517 return getModRefInfoMask(Loc
);
518 return ModRefInfo::ModRef
;
521 ModRefInfo
AAResults::getModRefInfo(const VAArgInst
*V
,
522 const MemoryLocation
&Loc
,
525 AliasResult AR
= alias(MemoryLocation::get(V
), Loc
, AAQI
, V
);
526 // If the va_arg address cannot alias the pointer in question, then the
527 // specified memory cannot be accessed by the va_arg.
528 if (AR
== AliasResult::NoAlias
)
529 return ModRefInfo::NoModRef
;
531 // If the pointer is a pointer to invariant memory, then it could not have
532 // been modified by this va_arg.
533 return getModRefInfoMask(Loc
, AAQI
);
536 // Otherwise, a va_arg reads and writes.
537 return ModRefInfo::ModRef
;
540 ModRefInfo
AAResults::getModRefInfo(const CatchPadInst
*CatchPad
,
541 const MemoryLocation
&Loc
,
544 // If the pointer is a pointer to invariant memory,
545 // then it could not have been modified by this catchpad.
546 return getModRefInfoMask(Loc
, AAQI
);
549 // Otherwise, a catchpad reads and writes.
550 return ModRefInfo::ModRef
;
553 ModRefInfo
AAResults::getModRefInfo(const CatchReturnInst
*CatchRet
,
554 const MemoryLocation
&Loc
,
557 // If the pointer is a pointer to invariant memory,
558 // then it could not have been modified by this catchpad.
559 return getModRefInfoMask(Loc
, AAQI
);
562 // Otherwise, a catchret reads and writes.
563 return ModRefInfo::ModRef
;
566 ModRefInfo
AAResults::getModRefInfo(const AtomicCmpXchgInst
*CX
,
567 const MemoryLocation
&Loc
,
569 // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
570 if (isStrongerThanMonotonic(CX
->getSuccessOrdering()))
571 return ModRefInfo::ModRef
;
574 AliasResult AR
= alias(MemoryLocation::get(CX
), Loc
, AAQI
, CX
);
575 // If the cmpxchg address does not alias the location, it does not access
577 if (AR
== AliasResult::NoAlias
)
578 return ModRefInfo::NoModRef
;
581 return ModRefInfo::ModRef
;
584 ModRefInfo
AAResults::getModRefInfo(const AtomicRMWInst
*RMW
,
585 const MemoryLocation
&Loc
,
587 // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
588 if (isStrongerThanMonotonic(RMW
->getOrdering()))
589 return ModRefInfo::ModRef
;
592 AliasResult AR
= alias(MemoryLocation::get(RMW
), Loc
, AAQI
, RMW
);
593 // If the atomicrmw address does not alias the location, it does not access
595 if (AR
== AliasResult::NoAlias
)
596 return ModRefInfo::NoModRef
;
599 return ModRefInfo::ModRef
;
602 ModRefInfo
AAResults::getModRefInfo(const Instruction
*I
,
603 const std::optional
<MemoryLocation
> &OptLoc
,
604 AAQueryInfo
&AAQIP
) {
605 if (OptLoc
== std::nullopt
) {
606 if (const auto *Call
= dyn_cast
<CallBase
>(I
))
607 return getMemoryEffects(Call
, AAQIP
).getModRef();
610 const MemoryLocation
&Loc
= OptLoc
.value_or(MemoryLocation());
612 switch (I
->getOpcode()) {
613 case Instruction::VAArg
:
614 return getModRefInfo((const VAArgInst
*)I
, Loc
, AAQIP
);
615 case Instruction::Load
:
616 return getModRefInfo((const LoadInst
*)I
, Loc
, AAQIP
);
617 case Instruction::Store
:
618 return getModRefInfo((const StoreInst
*)I
, Loc
, AAQIP
);
619 case Instruction::Fence
:
620 return getModRefInfo((const FenceInst
*)I
, Loc
, AAQIP
);
621 case Instruction::AtomicCmpXchg
:
622 return getModRefInfo((const AtomicCmpXchgInst
*)I
, Loc
, AAQIP
);
623 case Instruction::AtomicRMW
:
624 return getModRefInfo((const AtomicRMWInst
*)I
, Loc
, AAQIP
);
625 case Instruction::Call
:
626 case Instruction::CallBr
:
627 case Instruction::Invoke
:
628 return getModRefInfo((const CallBase
*)I
, Loc
, AAQIP
);
629 case Instruction::CatchPad
:
630 return getModRefInfo((const CatchPadInst
*)I
, Loc
, AAQIP
);
631 case Instruction::CatchRet
:
632 return getModRefInfo((const CatchReturnInst
*)I
, Loc
, AAQIP
);
634 assert(!I
->mayReadOrWriteMemory() &&
635 "Unhandled memory access instruction!");
636 return ModRefInfo::NoModRef
;
640 /// Return information about whether a particular call site modifies
641 /// or reads the specified memory location \p MemLoc before instruction \p I
643 /// FIXME: this is really just shoring-up a deficiency in alias analysis.
644 /// BasicAA isn't willing to spend linear time determining whether an alloca
645 /// was captured before or after this particular call, while we are. However,
646 /// with a smarter AA in place, this test is just wasting compile time.
647 ModRefInfo
AAResults::callCapturesBefore(const Instruction
*I
,
648 const MemoryLocation
&MemLoc
,
652 return ModRefInfo::ModRef
;
654 const Value
*Object
= getUnderlyingObject(MemLoc
.Ptr
);
655 if (!isIdentifiedFunctionLocal(Object
))
656 return ModRefInfo::ModRef
;
658 const auto *Call
= dyn_cast
<CallBase
>(I
);
659 if (!Call
|| Call
== Object
)
660 return ModRefInfo::ModRef
;
662 if (PointerMayBeCapturedBefore(Object
, /* ReturnCaptures */ true,
663 /* StoreCaptures */ true, I
, DT
,
664 /* include Object */ true))
665 return ModRefInfo::ModRef
;
668 ModRefInfo R
= ModRefInfo::NoModRef
;
669 // Set flag only if no May found and all operands processed.
670 for (auto CI
= Call
->data_operands_begin(), CE
= Call
->data_operands_end();
671 CI
!= CE
; ++CI
, ++ArgNo
) {
672 // Only look at the no-capture or byval pointer arguments. If this
673 // pointer were passed to arguments that were neither of these, then it
674 // couldn't be no-capture.
675 if (!(*CI
)->getType()->isPointerTy() ||
676 (!Call
->doesNotCapture(ArgNo
) && ArgNo
< Call
->arg_size() &&
677 !Call
->isByValArgument(ArgNo
)))
681 alias(MemoryLocation::getBeforeOrAfter(*CI
),
682 MemoryLocation::getBeforeOrAfter(Object
), AAQI
, Call
);
683 // If this is a no-capture pointer argument, see if we can tell that it
684 // is impossible to alias the pointer we're checking. If not, we have to
685 // assume that the call could touch the pointer, even though it doesn't
687 if (AR
== AliasResult::NoAlias
)
689 if (Call
->doesNotAccessMemory(ArgNo
))
691 if (Call
->onlyReadsMemory(ArgNo
)) {
695 return ModRefInfo::ModRef
;
700 /// canBasicBlockModify - Return true if it is possible for execution of the
701 /// specified basic block to modify the location Loc.
703 bool AAResults::canBasicBlockModify(const BasicBlock
&BB
,
704 const MemoryLocation
&Loc
) {
705 return canInstructionRangeModRef(BB
.front(), BB
.back(), Loc
, ModRefInfo::Mod
);
708 /// canInstructionRangeModRef - Return true if it is possible for the
709 /// execution of the specified instructions to mod\ref (according to the
710 /// mode) the location Loc. The instructions to consider are all
711 /// of the instructions in the range of [I1,I2] INCLUSIVE.
712 /// I1 and I2 must be in the same basic block.
713 bool AAResults::canInstructionRangeModRef(const Instruction
&I1
,
714 const Instruction
&I2
,
715 const MemoryLocation
&Loc
,
716 const ModRefInfo Mode
) {
717 assert(I1
.getParent() == I2
.getParent() &&
718 "Instructions not in same basic block!");
719 BasicBlock::const_iterator I
= I1
.getIterator();
720 BasicBlock::const_iterator E
= I2
.getIterator();
721 ++E
; // Convert from inclusive to exclusive range.
723 for (; I
!= E
; ++I
) // Check every instruction in range
724 if (isModOrRefSet(getModRefInfo(&*I
, Loc
) & Mode
))
729 // Provide a definition for the root virtual destructor.
730 AAResults::Concept::~Concept() = default;
732 // Provide a definition for the static object used to identify passes.
733 AnalysisKey
AAManager::Key
;
735 ExternalAAWrapperPass::ExternalAAWrapperPass() : ImmutablePass(ID
) {
736 initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
739 ExternalAAWrapperPass::ExternalAAWrapperPass(CallbackT CB
)
740 : ImmutablePass(ID
), CB(std::move(CB
)) {
741 initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
744 char ExternalAAWrapperPass::ID
= 0;
746 INITIALIZE_PASS(ExternalAAWrapperPass
, "external-aa", "External Alias Analysis",
750 llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback
) {
751 return new ExternalAAWrapperPass(std::move(Callback
));
754 AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID
) {
755 initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
758 char AAResultsWrapperPass::ID
= 0;
760 INITIALIZE_PASS_BEGIN(AAResultsWrapperPass
, "aa",
761 "Function Alias Analysis Results", false, true)
762 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass
)
763 INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass
)
764 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass
)
765 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass
)
766 INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass
)
767 INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass
)
768 INITIALIZE_PASS_END(AAResultsWrapperPass
, "aa",
769 "Function Alias Analysis Results", false, true)
771 /// Run the wrapper pass to rebuild an aggregation over known AA passes.
773 /// This is the legacy pass manager's interface to the new-style AA results
774 /// aggregation object. Because this is somewhat shoe-horned into the legacy
775 /// pass manager, we hard code all the specific alias analyses available into
776 /// it. While the particular set enabled is configured via commandline flags,
777 /// adding a new alias analysis to LLVM will require adding support for it to
779 bool AAResultsWrapperPass::runOnFunction(Function
&F
) {
780 // NB! This *must* be reset before adding new AA results to the new
781 // AAResults object because in the legacy pass manager, each instance
782 // of these will refer to the *same* immutable analyses, registering and
783 // unregistering themselves with them. We need to carefully tear down the
784 // previous object first, in this case replacing it with an empty one, before
785 // registering new results.
787 new AAResults(getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
)));
789 // BasicAA is always available for function analyses. Also, we add it first
790 // so that it can trump TBAA results when it proves MustAlias.
791 // FIXME: TBAA should have an explicit mode to support this and then we
792 // should reconsider the ordering here.
794 AAR
->addAAResult(getAnalysis
<BasicAAWrapperPass
>().getResult());
796 // Populate the results with the currently available AAs.
797 if (auto *WrapperPass
= getAnalysisIfAvailable
<ScopedNoAliasAAWrapperPass
>())
798 AAR
->addAAResult(WrapperPass
->getResult());
799 if (auto *WrapperPass
= getAnalysisIfAvailable
<TypeBasedAAWrapperPass
>())
800 AAR
->addAAResult(WrapperPass
->getResult());
801 if (auto *WrapperPass
= getAnalysisIfAvailable
<GlobalsAAWrapperPass
>())
802 AAR
->addAAResult(WrapperPass
->getResult());
803 if (auto *WrapperPass
= getAnalysisIfAvailable
<SCEVAAWrapperPass
>())
804 AAR
->addAAResult(WrapperPass
->getResult());
806 // If available, run an external AA providing callback over the results as
808 if (auto *WrapperPass
= getAnalysisIfAvailable
<ExternalAAWrapperPass
>())
810 WrapperPass
->CB(*this, F
, *AAR
);
812 // Analyses don't mutate the IR, so return false.
816 void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
817 AU
.setPreservesAll();
818 AU
.addRequiredTransitive
<BasicAAWrapperPass
>();
819 AU
.addRequiredTransitive
<TargetLibraryInfoWrapperPass
>();
821 // We also need to mark all the alias analysis passes we will potentially
822 // probe in runOnFunction as used here to ensure the legacy pass manager
823 // preserves them. This hard coding of lists of alias analyses is specific to
824 // the legacy pass manager.
825 AU
.addUsedIfAvailable
<ScopedNoAliasAAWrapperPass
>();
826 AU
.addUsedIfAvailable
<TypeBasedAAWrapperPass
>();
827 AU
.addUsedIfAvailable
<GlobalsAAWrapperPass
>();
828 AU
.addUsedIfAvailable
<SCEVAAWrapperPass
>();
829 AU
.addUsedIfAvailable
<ExternalAAWrapperPass
>();
832 AAManager::Result
AAManager::run(Function
&F
, FunctionAnalysisManager
&AM
) {
833 Result
R(AM
.getResult
<TargetLibraryAnalysis
>(F
));
834 for (auto &Getter
: ResultGetters
)
839 bool llvm::isNoAliasCall(const Value
*V
) {
840 if (const auto *Call
= dyn_cast
<CallBase
>(V
))
841 return Call
->hasRetAttr(Attribute::NoAlias
);
845 static bool isNoAliasOrByValArgument(const Value
*V
) {
846 if (const Argument
*A
= dyn_cast
<Argument
>(V
))
847 return A
->hasNoAliasAttr() || A
->hasByValAttr();
851 bool llvm::isIdentifiedObject(const Value
*V
) {
852 if (isa
<AllocaInst
>(V
))
854 if (isa
<GlobalValue
>(V
) && !isa
<GlobalAlias
>(V
))
856 if (isNoAliasCall(V
))
858 if (isNoAliasOrByValArgument(V
))
863 bool llvm::isIdentifiedFunctionLocal(const Value
*V
) {
864 return isa
<AllocaInst
>(V
) || isNoAliasCall(V
) || isNoAliasOrByValArgument(V
);
867 bool llvm::isEscapeSource(const Value
*V
) {
868 if (auto *CB
= dyn_cast
<CallBase
>(V
))
869 return !isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CB
,
872 // The load case works because isNonEscapingLocalObject considers all
873 // stores to be escapes (it passes true for the StoreCaptures argument
874 // to PointerMayBeCaptured).
875 if (isa
<LoadInst
>(V
))
878 // The inttoptr case works because isNonEscapingLocalObject considers all
879 // means of converting or equating a pointer to an int (ptrtoint, ptr store
880 // which could be followed by an integer load, ptr<->int compare) as
881 // escaping, and objects located at well-known addresses via platform-specific
882 // means cannot be considered non-escaping local objects.
883 if (isa
<IntToPtrInst
>(V
))
886 // Same for inttoptr constant expressions.
887 if (auto *CE
= dyn_cast
<ConstantExpr
>(V
))
888 if (CE
->getOpcode() == Instruction::IntToPtr
)
894 bool llvm::isNotVisibleOnUnwind(const Value
*Object
,
895 bool &RequiresNoCaptureBeforeUnwind
) {
896 RequiresNoCaptureBeforeUnwind
= false;
898 // Alloca goes out of scope on unwind.
899 if (isa
<AllocaInst
>(Object
))
902 // Byval goes out of scope on unwind.
903 if (auto *A
= dyn_cast
<Argument
>(Object
))
904 return A
->hasByValAttr() || A
->hasAttribute(Attribute::DeadOnUnwind
);
906 // A noalias return is not accessible from any other code. If the pointer
907 // does not escape prior to the unwind, then the caller cannot access the
909 if (isNoAliasCall(Object
)) {
910 RequiresNoCaptureBeforeUnwind
= true;
917 // We don't consider globals as writable: While the physical memory is writable,
918 // we may not have provenance to perform the write.
919 bool llvm::isWritableObject(const Value
*Object
,
920 bool &ExplicitlyDereferenceableOnly
) {
921 ExplicitlyDereferenceableOnly
= false;
923 // TODO: Alloca might not be writable after its lifetime ends.
924 // See https://github.com/llvm/llvm-project/issues/51838.
925 if (isa
<AllocaInst
>(Object
))
928 if (auto *A
= dyn_cast
<Argument
>(Object
)) {
929 if (A
->hasAttribute(Attribute::Writable
)) {
930 ExplicitlyDereferenceableOnly
= true;
934 return A
->hasByValAttr();
937 // TODO: Noalias shouldn't imply writability, this should check for an
938 // allocator function instead.
939 return isNoAliasCall(Object
);