1 //===-- CodeGenTBAA.cpp - TBAA information for LLVM CodeGen ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This is the code that manages TBAA information and defines the TBAA policy
10 // for the optimizer to use. Relevant standards text includes:
13 // C++ [basic.lval] (p10 in n3126, p15 in some earlier versions)
15 //===----------------------------------------------------------------------===//
17 #include "CodeGenTBAA.h"
18 #include "ABIInfoImpl.h"
20 #include "CGRecordLayout.h"
21 #include "CodeGenTypes.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Mangle.h"
25 #include "clang/AST/RecordLayout.h"
26 #include "clang/Basic/CodeGenOptions.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include "llvm/IR/Metadata.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/Support/Debug.h"
33 using namespace clang
;
34 using namespace CodeGen
;
36 CodeGenTBAA::CodeGenTBAA(ASTContext
&Ctx
, CodeGenTypes
&CGTypes
,
37 llvm::Module
&M
, const CodeGenOptions
&CGO
,
38 const LangOptions
&Features
)
39 : Context(Ctx
), CGTypes(CGTypes
), Module(M
), CodeGenOpts(CGO
),
41 MangleCtx(ItaniumMangleContext::create(Ctx
, Ctx
.getDiagnostics())),
42 MDHelper(M
.getContext()), Root(nullptr), Char(nullptr) {}
44 CodeGenTBAA::~CodeGenTBAA() {
47 llvm::MDNode
*CodeGenTBAA::getRoot() {
48 // Define the root of the tree. This identifies the tree, so that
49 // if our LLVM IR is linked with LLVM IR from a different front-end
50 // (or a different version of this front-end), their TBAA trees will
51 // remain distinct, and the optimizer will treat them conservatively.
53 if (Features
.CPlusPlus
)
54 Root
= MDHelper
.createTBAARoot("Simple C++ TBAA");
56 Root
= MDHelper
.createTBAARoot("Simple C/C++ TBAA");
62 llvm::MDNode
*CodeGenTBAA::createScalarTypeNode(StringRef Name
,
65 if (CodeGenOpts
.NewStructPathTBAA
) {
66 llvm::Metadata
*Id
= MDHelper
.createString(Name
);
67 return MDHelper
.createTBAATypeNode(Parent
, Size
, Id
);
69 return MDHelper
.createTBAAScalarTypeNode(Name
, Parent
);
72 llvm::MDNode
*CodeGenTBAA::getChar() {
73 // Define the root of the tree for user-accessible memory. C and C++
74 // give special powers to char and certain similar types. However,
75 // these special powers only cover user-accessible memory, and doesn't
76 // include things like vtables.
78 Char
= createScalarTypeNode("omnipotent char", getRoot(), /* Size= */ 1);
83 static bool TypeHasMayAlias(QualType QTy
) {
84 // Tagged types have declarations, and therefore may have attributes.
85 if (auto *TD
= QTy
->getAsTagDecl())
86 if (TD
->hasAttr
<MayAliasAttr
>())
89 // Also look for may_alias as a declaration attribute on a typedef.
90 // FIXME: We should follow GCC and model may_alias as a type attribute
91 // rather than as a declaration attribute.
92 while (auto *TT
= QTy
->getAs
<TypedefType
>()) {
93 if (TT
->getDecl()->hasAttr
<MayAliasAttr
>())
100 /// Check if the given type is a valid base type to be used in access tags.
101 static bool isValidBaseType(QualType QTy
) {
102 if (const RecordType
*TTy
= QTy
->getAs
<RecordType
>()) {
103 const RecordDecl
*RD
= TTy
->getDecl()->getDefinition();
104 // Incomplete types are not valid base access types.
107 if (RD
->hasFlexibleArrayMember())
109 // RD can be struct, union, class, interface or enum.
110 // For now, we only handle struct and class.
111 if (RD
->isStruct() || RD
->isClass())
117 llvm::MDNode
*CodeGenTBAA::getTypeInfoHelper(const Type
*Ty
) {
118 uint64_t Size
= Context
.getTypeSizeInChars(Ty
).getQuantity();
120 // Handle builtin types.
121 if (const BuiltinType
*BTy
= dyn_cast
<BuiltinType
>(Ty
)) {
122 switch (BTy
->getKind()) {
123 // Character types are special and can alias anything.
124 // In C++, this technically only includes "char" and "unsigned char",
125 // and not "signed char". In C, it includes all three. For now,
126 // the risk of exploiting this detail in C++ seems likely to outweigh
128 case BuiltinType::Char_U
:
129 case BuiltinType::Char_S
:
130 case BuiltinType::UChar
:
131 case BuiltinType::SChar
:
134 // Unsigned types can alias their corresponding signed types.
135 case BuiltinType::UShort
:
136 return getTypeInfo(Context
.ShortTy
);
137 case BuiltinType::UInt
:
138 return getTypeInfo(Context
.IntTy
);
139 case BuiltinType::ULong
:
140 return getTypeInfo(Context
.LongTy
);
141 case BuiltinType::ULongLong
:
142 return getTypeInfo(Context
.LongLongTy
);
143 case BuiltinType::UInt128
:
144 return getTypeInfo(Context
.Int128Ty
);
146 case BuiltinType::UShortFract
:
147 return getTypeInfo(Context
.ShortFractTy
);
148 case BuiltinType::UFract
:
149 return getTypeInfo(Context
.FractTy
);
150 case BuiltinType::ULongFract
:
151 return getTypeInfo(Context
.LongFractTy
);
153 case BuiltinType::SatUShortFract
:
154 return getTypeInfo(Context
.SatShortFractTy
);
155 case BuiltinType::SatUFract
:
156 return getTypeInfo(Context
.SatFractTy
);
157 case BuiltinType::SatULongFract
:
158 return getTypeInfo(Context
.SatLongFractTy
);
160 case BuiltinType::UShortAccum
:
161 return getTypeInfo(Context
.ShortAccumTy
);
162 case BuiltinType::UAccum
:
163 return getTypeInfo(Context
.AccumTy
);
164 case BuiltinType::ULongAccum
:
165 return getTypeInfo(Context
.LongAccumTy
);
167 case BuiltinType::SatUShortAccum
:
168 return getTypeInfo(Context
.SatShortAccumTy
);
169 case BuiltinType::SatUAccum
:
170 return getTypeInfo(Context
.SatAccumTy
);
171 case BuiltinType::SatULongAccum
:
172 return getTypeInfo(Context
.SatLongAccumTy
);
174 // Treat all other builtin types as distinct types. This includes
175 // treating wchar_t, char16_t, and char32_t as distinct from their
176 // "underlying types".
178 return createScalarTypeNode(BTy
->getName(Features
), getChar(), Size
);
182 // C++1z [basic.lval]p10: "If a program attempts to access the stored value of
183 // an object through a glvalue of other than one of the following types the
184 // behavior is undefined: [...] a char, unsigned char, or std::byte type."
185 if (Ty
->isStdByteType())
188 // Handle pointers and references.
190 // C has a very strict rule for pointer aliasing. C23 6.7.6.1p2:
191 // For two pointer types to be compatible, both shall be identically
192 // qualified and both shall be pointers to compatible types.
194 // This rule is impractically strict; we want to at least ignore CVR
195 // qualifiers. Distinguishing by CVR qualifiers would make it UB to
196 // e.g. cast a `char **` to `const char * const *` and dereference it,
197 // which is too common and useful to invalidate. C++'s similar types
198 // rule permits qualifier differences in these nested positions; in fact,
199 // C++ even allows that cast as an implicit conversion.
201 // Other qualifiers could theoretically be distinguished, especially if
202 // they involve a significant representation difference. We don't
203 // currently do so, however.
204 if (Ty
->isPointerType() || Ty
->isReferenceType()) {
205 llvm::MDNode
*AnyPtr
= createScalarTypeNode("any pointer", getChar(), Size
);
206 if (!CodeGenOpts
.PointerTBAA
)
208 // Compute the depth of the pointer and generate a tag of the form "p<depth>
210 unsigned PtrDepth
= 0;
213 Ty
= Ty
->getPointeeType().getTypePtr();
214 } while (Ty
->isPointerType());
215 Ty
= Context
.getBaseElementType(QualType(Ty
, 0)).getTypePtr();
216 assert(!isa
<VariableArrayType
>(Ty
));
217 // When the underlying type is a builtin type, we compute the pointee type
218 // string recursively, which is implicitly more forgiving than the standards
219 // require. Effectively, we are turning the question "are these types
220 // compatible/similar" into "are accesses to these types allowed to alias".
221 // In both C and C++, the latter question has special carve-outs for
222 // signedness mismatches that only apply at the top level. As a result, we
223 // are allowing e.g. `int *` l-values to access `unsigned *` objects.
224 SmallString
<256> TyName
;
225 if (isa
<BuiltinType
>(Ty
)) {
226 llvm::MDNode
*ScalarMD
= getTypeInfoHelper(Ty
);
228 cast
<llvm::MDString
>(
229 ScalarMD
->getOperand(CodeGenOpts
.NewStructPathTBAA
? 2 : 0))
233 // For non-builtin types use the mangled name of the canonical type.
234 llvm::raw_svector_ostream
TyOut(TyName
);
235 MangleCtx
->mangleCanonicalTypeName(QualType(Ty
, 0), TyOut
);
238 SmallString
<256> OutName("p");
239 OutName
+= std::to_string(PtrDepth
);
242 return createScalarTypeNode(OutName
, AnyPtr
, Size
);
245 // Accesses to arrays are accesses to objects of their element types.
246 if (CodeGenOpts
.NewStructPathTBAA
&& Ty
->isArrayType())
247 return getTypeInfo(cast
<ArrayType
>(Ty
)->getElementType());
249 // Enum types are distinct types. In C++ they have "underlying types",
250 // however they aren't related for TBAA.
251 if (const EnumType
*ETy
= dyn_cast
<EnumType
>(Ty
)) {
252 if (!Features
.CPlusPlus
)
253 return getTypeInfo(ETy
->getDecl()->getIntegerType());
255 // In C++ mode, types have linkage, so we can rely on the ODR and
256 // on their mangled names, if they're external.
257 // TODO: Is there a way to get a program-wide unique name for a
258 // decl with local linkage or no linkage?
259 if (!ETy
->getDecl()->isExternallyVisible())
262 SmallString
<256> OutName
;
263 llvm::raw_svector_ostream
Out(OutName
);
264 CGTypes
.getCXXABI().getMangleContext().mangleCanonicalTypeName(
265 QualType(ETy
, 0), Out
);
266 return createScalarTypeNode(OutName
, getChar(), Size
);
269 if (const auto *EIT
= dyn_cast
<BitIntType
>(Ty
)) {
270 SmallString
<256> OutName
;
271 llvm::raw_svector_ostream
Out(OutName
);
272 // Don't specify signed/unsigned since integer types can alias despite sign
274 Out
<< "_BitInt(" << EIT
->getNumBits() << ')';
275 return createScalarTypeNode(OutName
, getChar(), Size
);
278 // For now, handle any other kind of type conservatively.
282 llvm::MDNode
*CodeGenTBAA::getTypeInfo(QualType QTy
) {
283 // At -O0 or relaxed aliasing, TBAA is not emitted for regular types.
284 if (CodeGenOpts
.OptimizationLevel
== 0 || CodeGenOpts
.RelaxedAliasing
)
287 // If the type has the may_alias attribute (even on a typedef), it is
288 // effectively in the general char alias class.
289 if (TypeHasMayAlias(QTy
))
292 // We need this function to not fall back to returning the "omnipotent char"
293 // type node for aggregate and union types. Otherwise, any dereference of an
294 // aggregate will result into the may-alias access descriptor, meaning all
295 // subsequent accesses to direct and indirect members of that aggregate will
296 // be considered may-alias too.
297 // TODO: Combine getTypeInfo() and getValidBaseTypeInfo() into a single
299 if (isValidBaseType(QTy
))
300 return getValidBaseTypeInfo(QTy
);
302 const Type
*Ty
= Context
.getCanonicalType(QTy
).getTypePtr();
303 if (llvm::MDNode
*N
= MetadataCache
[Ty
])
306 // Note that the following helper call is allowed to add new nodes to the
307 // cache, which invalidates all its previously obtained iterators. So we
308 // first generate the node for the type and then add that node to the cache.
309 llvm::MDNode
*TypeNode
= getTypeInfoHelper(Ty
);
310 return MetadataCache
[Ty
] = TypeNode
;
313 TBAAAccessInfo
CodeGenTBAA::getAccessInfo(QualType AccessType
) {
314 // Pointee values may have incomplete types, but they shall never be
316 if (AccessType
->isIncompleteType())
317 return TBAAAccessInfo::getIncompleteInfo();
319 if (TypeHasMayAlias(AccessType
))
320 return TBAAAccessInfo::getMayAliasInfo();
322 uint64_t Size
= Context
.getTypeSizeInChars(AccessType
).getQuantity();
323 return TBAAAccessInfo(getTypeInfo(AccessType
), Size
);
326 TBAAAccessInfo
CodeGenTBAA::getVTablePtrAccessInfo(llvm::Type
*VTablePtrType
) {
327 const llvm::DataLayout
&DL
= Module
.getDataLayout();
328 unsigned Size
= DL
.getPointerTypeSize(VTablePtrType
);
329 return TBAAAccessInfo(createScalarTypeNode("vtable pointer", getRoot(), Size
),
334 CodeGenTBAA::CollectFields(uint64_t BaseOffset
,
336 SmallVectorImpl
<llvm::MDBuilder::TBAAStructField
> &
339 /* Things not handled yet include: C++ base classes, bitfields, */
341 if (const RecordType
*TTy
= QTy
->getAs
<RecordType
>()) {
342 if (TTy
->isUnionType()) {
343 uint64_t Size
= Context
.getTypeSizeInChars(QTy
).getQuantity();
344 llvm::MDNode
*TBAAType
= getChar();
345 llvm::MDNode
*TBAATag
= getAccessTagInfo(TBAAAccessInfo(TBAAType
, Size
));
347 llvm::MDBuilder::TBAAStructField(BaseOffset
, Size
, TBAATag
));
350 const RecordDecl
*RD
= TTy
->getDecl()->getDefinition();
351 if (RD
->hasFlexibleArrayMember())
354 // TODO: Handle C++ base classes.
355 if (const CXXRecordDecl
*Decl
= dyn_cast
<CXXRecordDecl
>(RD
))
356 if (Decl
->bases_begin() != Decl
->bases_end())
359 const ASTRecordLayout
&Layout
= Context
.getASTRecordLayout(RD
);
360 const CGRecordLayout
&CGRL
= CGTypes
.getCGRecordLayout(RD
);
363 for (RecordDecl::field_iterator i
= RD
->field_begin(), e
= RD
->field_end();
364 i
!= e
; ++i
, ++idx
) {
365 if (isEmptyFieldForLayout(Context
, *i
))
369 BaseOffset
+ Layout
.getFieldOffset(idx
) / Context
.getCharWidth();
371 // Create a single field for consecutive named bitfields using char as
373 if ((*i
)->isBitField()) {
374 const CGBitFieldInfo
&Info
= CGRL
.getBitFieldInfo(*i
);
375 // For big endian targets the first bitfield in the consecutive run is
376 // at the most-significant end; see CGRecordLowering::setBitFieldInfo
377 // for more information.
378 bool IsBE
= Context
.getTargetInfo().isBigEndian();
379 bool IsFirst
= IsBE
? Info
.StorageSize
- (Info
.Offset
+ Info
.Size
) == 0
383 unsigned CurrentBitFieldSize
= Info
.StorageSize
;
385 llvm::divideCeil(CurrentBitFieldSize
, Context
.getCharWidth());
386 llvm::MDNode
*TBAAType
= getChar();
387 llvm::MDNode
*TBAATag
=
388 getAccessTagInfo(TBAAAccessInfo(TBAAType
, Size
));
390 llvm::MDBuilder::TBAAStructField(Offset
, Size
, TBAATag
));
394 QualType FieldQTy
= i
->getType();
395 if (!CollectFields(Offset
, FieldQTy
, Fields
,
396 MayAlias
|| TypeHasMayAlias(FieldQTy
)))
402 /* Otherwise, treat whatever it is as a field. */
403 uint64_t Offset
= BaseOffset
;
404 uint64_t Size
= Context
.getTypeSizeInChars(QTy
).getQuantity();
405 llvm::MDNode
*TBAAType
= MayAlias
? getChar() : getTypeInfo(QTy
);
406 llvm::MDNode
*TBAATag
= getAccessTagInfo(TBAAAccessInfo(TBAAType
, Size
));
407 Fields
.push_back(llvm::MDBuilder::TBAAStructField(Offset
, Size
, TBAATag
));
412 CodeGenTBAA::getTBAAStructInfo(QualType QTy
) {
413 if (CodeGenOpts
.OptimizationLevel
== 0 || CodeGenOpts
.RelaxedAliasing
)
416 const Type
*Ty
= Context
.getCanonicalType(QTy
).getTypePtr();
418 if (llvm::MDNode
*N
= StructMetadataCache
[Ty
])
421 SmallVector
<llvm::MDBuilder::TBAAStructField
, 4> Fields
;
422 if (CollectFields(0, QTy
, Fields
, TypeHasMayAlias(QTy
)))
423 return MDHelper
.createTBAAStructNode(Fields
);
425 // For now, handle any other kind of type conservatively.
426 return StructMetadataCache
[Ty
] = nullptr;
429 llvm::MDNode
*CodeGenTBAA::getBaseTypeInfoHelper(const Type
*Ty
) {
430 if (auto *TTy
= dyn_cast
<RecordType
>(Ty
)) {
431 const RecordDecl
*RD
= TTy
->getDecl()->getDefinition();
432 const ASTRecordLayout
&Layout
= Context
.getASTRecordLayout(RD
);
433 using TBAAStructField
= llvm::MDBuilder::TBAAStructField
;
434 SmallVector
<TBAAStructField
, 4> Fields
;
435 if (const CXXRecordDecl
*CXXRD
= dyn_cast
<CXXRecordDecl
>(RD
)) {
436 // Handle C++ base classes. Non-virtual bases can treated a kind of
437 // field. Virtual bases are more complex and omitted, but avoid an
438 // incomplete view for NewStructPathTBAA.
439 if (CodeGenOpts
.NewStructPathTBAA
&& CXXRD
->getNumVBases() != 0)
441 for (const CXXBaseSpecifier
&B
: CXXRD
->bases()) {
444 QualType BaseQTy
= B
.getType();
445 const CXXRecordDecl
*BaseRD
= BaseQTy
->getAsCXXRecordDecl();
446 if (BaseRD
->isEmpty())
448 llvm::MDNode
*TypeNode
= isValidBaseType(BaseQTy
)
449 ? getValidBaseTypeInfo(BaseQTy
)
450 : getTypeInfo(BaseQTy
);
453 uint64_t Offset
= Layout
.getBaseClassOffset(BaseRD
).getQuantity();
455 Context
.getASTRecordLayout(BaseRD
).getDataSize().getQuantity();
457 llvm::MDBuilder::TBAAStructField(Offset
, Size
, TypeNode
));
459 // The order in which base class subobjects are allocated is unspecified,
460 // so may differ from declaration order. In particular, Itanium ABI will
461 // allocate a primary base first.
462 // Since we exclude empty subobjects, the objects are not overlapping and
463 // their offsets are unique.
465 [](const TBAAStructField
&A
, const TBAAStructField
&B
) {
466 return A
.Offset
< B
.Offset
;
469 for (FieldDecl
*Field
: RD
->fields()) {
470 if (Field
->isZeroSize(Context
) || Field
->isUnnamedBitField())
472 QualType FieldQTy
= Field
->getType();
473 llvm::MDNode
*TypeNode
= isValidBaseType(FieldQTy
)
474 ? getValidBaseTypeInfo(FieldQTy
)
475 : getTypeInfo(FieldQTy
);
479 uint64_t BitOffset
= Layout
.getFieldOffset(Field
->getFieldIndex());
480 uint64_t Offset
= Context
.toCharUnitsFromBits(BitOffset
).getQuantity();
481 uint64_t Size
= Context
.getTypeSizeInChars(FieldQTy
).getQuantity();
482 Fields
.push_back(llvm::MDBuilder::TBAAStructField(Offset
, Size
,
486 SmallString
<256> OutName
;
487 if (Features
.CPlusPlus
) {
488 // Don't use the mangler for C code.
489 llvm::raw_svector_ostream
Out(OutName
);
490 CGTypes
.getCXXABI().getMangleContext().mangleCanonicalTypeName(
491 QualType(Ty
, 0), Out
);
493 OutName
= RD
->getName();
496 if (CodeGenOpts
.NewStructPathTBAA
) {
497 llvm::MDNode
*Parent
= getChar();
498 uint64_t Size
= Context
.getTypeSizeInChars(Ty
).getQuantity();
499 llvm::Metadata
*Id
= MDHelper
.createString(OutName
);
500 return MDHelper
.createTBAATypeNode(Parent
, Size
, Id
, Fields
);
503 // Create the struct type node with a vector of pairs (offset, type).
504 SmallVector
<std::pair
<llvm::MDNode
*, uint64_t>, 4> OffsetsAndTypes
;
505 for (const auto &Field
: Fields
)
506 OffsetsAndTypes
.push_back(std::make_pair(Field
.Type
, Field
.Offset
));
507 return MDHelper
.createTBAAStructTypeNode(OutName
, OffsetsAndTypes
);
513 llvm::MDNode
*CodeGenTBAA::getValidBaseTypeInfo(QualType QTy
) {
514 assert(isValidBaseType(QTy
) && "Must be a valid base type");
516 const Type
*Ty
= Context
.getCanonicalType(QTy
).getTypePtr();
518 // nullptr is a valid value in the cache, so use find rather than []
519 auto I
= BaseTypeMetadataCache
.find(Ty
);
520 if (I
!= BaseTypeMetadataCache
.end())
523 // First calculate the metadata, before recomputing the insertion point, as
524 // the helper can recursively call us.
525 llvm::MDNode
*TypeNode
= getBaseTypeInfoHelper(Ty
);
526 LLVM_ATTRIBUTE_UNUSED
auto inserted
=
527 BaseTypeMetadataCache
.insert({Ty
, TypeNode
});
528 assert(inserted
.second
&& "BaseType metadata was already inserted");
533 llvm::MDNode
*CodeGenTBAA::getBaseTypeInfo(QualType QTy
) {
534 return isValidBaseType(QTy
) ? getValidBaseTypeInfo(QTy
) : nullptr;
537 llvm::MDNode
*CodeGenTBAA::getAccessTagInfo(TBAAAccessInfo Info
) {
538 assert(!Info
.isIncomplete() && "Access to an object of an incomplete type!");
540 if (Info
.isMayAlias())
541 Info
= TBAAAccessInfo(getChar(), Info
.Size
);
543 if (!Info
.AccessType
)
546 if (!CodeGenOpts
.StructPathTBAA
)
547 Info
= TBAAAccessInfo(Info
.AccessType
, Info
.Size
);
549 llvm::MDNode
*&N
= AccessTagMetadataCache
[Info
];
553 if (!Info
.BaseType
) {
554 Info
.BaseType
= Info
.AccessType
;
555 assert(!Info
.Offset
&& "Nonzero offset for an access with no base type!");
557 if (CodeGenOpts
.NewStructPathTBAA
) {
558 return N
= MDHelper
.createTBAAAccessTag(Info
.BaseType
, Info
.AccessType
,
559 Info
.Offset
, Info
.Size
);
561 return N
= MDHelper
.createTBAAStructTagNode(Info
.BaseType
, Info
.AccessType
,
565 TBAAAccessInfo
CodeGenTBAA::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo
,
566 TBAAAccessInfo TargetInfo
) {
567 if (SourceInfo
.isMayAlias() || TargetInfo
.isMayAlias())
568 return TBAAAccessInfo::getMayAliasInfo();
573 CodeGenTBAA::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA
,
574 TBAAAccessInfo InfoB
) {
578 if (!InfoA
|| !InfoB
)
579 return TBAAAccessInfo();
581 if (InfoA
.isMayAlias() || InfoB
.isMayAlias())
582 return TBAAAccessInfo::getMayAliasInfo();
584 // TODO: Implement the rest of the logic here. For example, two accesses
585 // with same final access types result in an access to an object of that final
586 // access type regardless of their base types.
587 return TBAAAccessInfo::getMayAliasInfo();
591 CodeGenTBAA::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo
,
592 TBAAAccessInfo SrcInfo
) {
593 if (DestInfo
== SrcInfo
)
596 if (!DestInfo
|| !SrcInfo
)
597 return TBAAAccessInfo();
599 if (DestInfo
.isMayAlias() || SrcInfo
.isMayAlias())
600 return TBAAAccessInfo::getMayAliasInfo();
602 // TODO: Implement the rest of the logic here. For example, two accesses
603 // with same final access types result in an access to an object of that final
604 // access type regardless of their base types.
605 return TBAAAccessInfo::getMayAliasInfo();