1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the ASTContext interface.
11 //===----------------------------------------------------------------------===//
13 #include "clang/AST/ASTContext.h"
15 #include "Interp/Context.h"
16 #include "clang/AST/APValue.h"
17 #include "clang/AST/ASTConcept.h"
18 #include "clang/AST/ASTMutationListener.h"
19 #include "clang/AST/ASTTypeTraits.h"
20 #include "clang/AST/Attr.h"
21 #include "clang/AST/AttrIterator.h"
22 #include "clang/AST/CharUnits.h"
23 #include "clang/AST/Comment.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/DeclBase.h"
26 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/DeclContextInternals.h"
28 #include "clang/AST/DeclObjC.h"
29 #include "clang/AST/DeclOpenMP.h"
30 #include "clang/AST/DeclTemplate.h"
31 #include "clang/AST/DeclarationName.h"
32 #include "clang/AST/DependenceFlags.h"
33 #include "clang/AST/Expr.h"
34 #include "clang/AST/ExprCXX.h"
35 #include "clang/AST/ExprConcepts.h"
36 #include "clang/AST/ExternalASTSource.h"
37 #include "clang/AST/Mangle.h"
38 #include "clang/AST/MangleNumberingContext.h"
39 #include "clang/AST/NestedNameSpecifier.h"
40 #include "clang/AST/ParentMapContext.h"
41 #include "clang/AST/RawCommentList.h"
42 #include "clang/AST/RecordLayout.h"
43 #include "clang/AST/Stmt.h"
44 #include "clang/AST/TemplateBase.h"
45 #include "clang/AST/TemplateName.h"
46 #include "clang/AST/Type.h"
47 #include "clang/AST/TypeLoc.h"
48 #include "clang/AST/UnresolvedSet.h"
49 #include "clang/AST/VTableBuilder.h"
50 #include "clang/Basic/AddressSpaces.h"
51 #include "clang/Basic/Builtins.h"
52 #include "clang/Basic/CommentOptions.h"
53 #include "clang/Basic/ExceptionSpecificationType.h"
54 #include "clang/Basic/IdentifierTable.h"
55 #include "clang/Basic/LLVM.h"
56 #include "clang/Basic/LangOptions.h"
57 #include "clang/Basic/Linkage.h"
58 #include "clang/Basic/Module.h"
59 #include "clang/Basic/NoSanitizeList.h"
60 #include "clang/Basic/ObjCRuntime.h"
61 #include "clang/Basic/ProfileList.h"
62 #include "clang/Basic/SourceLocation.h"
63 #include "clang/Basic/SourceManager.h"
64 #include "clang/Basic/Specifiers.h"
65 #include "clang/Basic/TargetCXXABI.h"
66 #include "clang/Basic/TargetInfo.h"
67 #include "clang/Basic/XRayLists.h"
68 #include "llvm/ADT/APFixedPoint.h"
69 #include "llvm/ADT/APInt.h"
70 #include "llvm/ADT/APSInt.h"
71 #include "llvm/ADT/ArrayRef.h"
72 #include "llvm/ADT/DenseMap.h"
73 #include "llvm/ADT/DenseSet.h"
74 #include "llvm/ADT/FoldingSet.h"
75 #include "llvm/ADT/PointerUnion.h"
76 #include "llvm/ADT/STLExtras.h"
77 #include "llvm/ADT/SmallPtrSet.h"
78 #include "llvm/ADT/SmallVector.h"
79 #include "llvm/ADT/StringExtras.h"
80 #include "llvm/ADT/StringRef.h"
81 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
82 #include "llvm/Support/Capacity.h"
83 #include "llvm/Support/Casting.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/ErrorHandling.h"
86 #include "llvm/Support/MD5.h"
87 #include "llvm/Support/MathExtras.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include "llvm/TargetParser/Triple.h"
102 using namespace clang
;
115 /// \returns The locations that are relevant when searching for Doc comments
117 static SmallVector
<SourceLocation
, 2>
118 getDeclLocsForCommentSearch(const Decl
*D
, SourceManager
&SourceMgr
) {
121 // User can not attach documentation to implicit declarations.
125 // User can not attach documentation to implicit instantiations.
126 if (const auto *FD
= dyn_cast
<FunctionDecl
>(D
)) {
127 if (FD
->getTemplateSpecializationKind() == TSK_ImplicitInstantiation
)
131 if (const auto *VD
= dyn_cast
<VarDecl
>(D
)) {
132 if (VD
->isStaticDataMember() &&
133 VD
->getTemplateSpecializationKind() == TSK_ImplicitInstantiation
)
137 if (const auto *CRD
= dyn_cast
<CXXRecordDecl
>(D
)) {
138 if (CRD
->getTemplateSpecializationKind() == TSK_ImplicitInstantiation
)
142 if (const auto *CTSD
= dyn_cast
<ClassTemplateSpecializationDecl
>(D
)) {
143 TemplateSpecializationKind TSK
= CTSD
->getSpecializationKind();
144 if (TSK
== TSK_ImplicitInstantiation
||
145 TSK
== TSK_Undeclared
)
149 if (const auto *ED
= dyn_cast
<EnumDecl
>(D
)) {
150 if (ED
->getTemplateSpecializationKind() == TSK_ImplicitInstantiation
)
153 if (const auto *TD
= dyn_cast
<TagDecl
>(D
)) {
154 // When tag declaration (but not definition!) is part of the
155 // decl-specifier-seq of some other declaration, it doesn't get comment
156 if (TD
->isEmbeddedInDeclarator() && !TD
->isCompleteDefinition())
159 // TODO: handle comments for function parameters properly.
160 if (isa
<ParmVarDecl
>(D
))
163 // TODO: we could look up template parameter documentation in the template
165 if (isa
<TemplateTypeParmDecl
>(D
) ||
166 isa
<NonTypeTemplateParmDecl
>(D
) ||
167 isa
<TemplateTemplateParmDecl
>(D
))
170 SmallVector
<SourceLocation
, 2> Locations
;
171 // Find declaration location.
172 // For Objective-C declarations we generally don't expect to have multiple
173 // declarators, thus use declaration starting location as the "declaration
175 // For all other declarations multiple declarators are used quite frequently,
176 // so we use the location of the identifier as the "declaration location".
177 SourceLocation BaseLocation
;
178 if (isa
<ObjCMethodDecl
>(D
) || isa
<ObjCContainerDecl
>(D
) ||
179 isa
<ObjCPropertyDecl
>(D
) || isa
<RedeclarableTemplateDecl
>(D
) ||
180 isa
<ClassTemplateSpecializationDecl
>(D
) ||
181 // Allow association with Y across {} in `typedef struct X {} Y`.
183 BaseLocation
= D
->getBeginLoc();
185 BaseLocation
= D
->getLocation();
187 if (!D
->getLocation().isMacroID()) {
188 Locations
.emplace_back(BaseLocation
);
190 const auto *DeclCtx
= D
->getDeclContext();
192 // When encountering definitions generated from a macro (that are not
193 // contained by another declaration in the macro) we need to try and find
194 // the comment at the location of the expansion but if there is no comment
195 // there we should retry to see if there is a comment inside the macro as
196 // well. To this end we return first BaseLocation to first look at the
197 // expansion site, the second value is the spelling location of the
198 // beginning of the declaration defined inside the macro.
200 Decl::castFromDeclContext(DeclCtx
)->getLocation().isMacroID())) {
201 Locations
.emplace_back(SourceMgr
.getExpansionLoc(BaseLocation
));
204 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
205 // we don't refer to the macro argument location at the expansion site (this
206 // can happen if the name's spelling is provided via macro argument), and
207 // always to the declaration itself.
208 Locations
.emplace_back(SourceMgr
.getSpellingLoc(D
->getBeginLoc()));
214 RawComment
*ASTContext::getRawCommentForDeclNoCacheImpl(
215 const Decl
*D
, const SourceLocation RepresentativeLocForDecl
,
216 const std::map
<unsigned, RawComment
*> &CommentsInTheFile
) const {
217 // If the declaration doesn't map directly to a location in a file, we
218 // can't find the comment.
219 if (RepresentativeLocForDecl
.isInvalid() ||
220 !RepresentativeLocForDecl
.isFileID())
223 // If there are no comments anywhere, we won't find anything.
224 if (CommentsInTheFile
.empty())
227 // Decompose the location for the declaration and find the beginning of the
229 const std::pair
<FileID
, unsigned> DeclLocDecomp
=
230 SourceMgr
.getDecomposedLoc(RepresentativeLocForDecl
);
233 auto OffsetCommentBehindDecl
=
234 CommentsInTheFile
.lower_bound(DeclLocDecomp
.second
);
236 // First check whether we have a trailing comment.
237 if (OffsetCommentBehindDecl
!= CommentsInTheFile
.end()) {
238 RawComment
*CommentBehindDecl
= OffsetCommentBehindDecl
->second
;
239 if ((CommentBehindDecl
->isDocumentation() ||
240 LangOpts
.CommentOpts
.ParseAllComments
) &&
241 CommentBehindDecl
->isTrailingComment() &&
242 (isa
<FieldDecl
>(D
) || isa
<EnumConstantDecl
>(D
) || isa
<VarDecl
>(D
) ||
243 isa
<ObjCMethodDecl
>(D
) || isa
<ObjCPropertyDecl
>(D
))) {
245 // Check that Doxygen trailing comment comes after the declaration, starts
246 // on the same line and in the same file as the declaration.
247 if (SourceMgr
.getLineNumber(DeclLocDecomp
.first
, DeclLocDecomp
.second
) ==
248 Comments
.getCommentBeginLine(CommentBehindDecl
, DeclLocDecomp
.first
,
249 OffsetCommentBehindDecl
->first
)) {
250 return CommentBehindDecl
;
255 // The comment just after the declaration was not a trailing comment.
256 // Let's look at the previous comment.
257 if (OffsetCommentBehindDecl
== CommentsInTheFile
.begin())
260 auto OffsetCommentBeforeDecl
= --OffsetCommentBehindDecl
;
261 RawComment
*CommentBeforeDecl
= OffsetCommentBeforeDecl
->second
;
263 // Check that we actually have a non-member Doxygen comment.
264 if (!(CommentBeforeDecl
->isDocumentation() ||
265 LangOpts
.CommentOpts
.ParseAllComments
) ||
266 CommentBeforeDecl
->isTrailingComment())
269 // Decompose the end of the comment.
270 const unsigned CommentEndOffset
=
271 Comments
.getCommentEndOffset(CommentBeforeDecl
);
273 // Get the corresponding buffer.
274 bool Invalid
= false;
275 const char *Buffer
= SourceMgr
.getBufferData(DeclLocDecomp
.first
,
280 // Extract text between the comment and declaration.
281 StringRef
Text(Buffer
+ CommentEndOffset
,
282 DeclLocDecomp
.second
- CommentEndOffset
);
284 // There should be no other declarations or preprocessor directives between
285 // comment and declaration.
286 if (Text
.find_last_of(";{}#@") != StringRef::npos
)
289 return CommentBeforeDecl
;
292 RawComment
*ASTContext::getRawCommentForDeclNoCache(const Decl
*D
) const {
293 const auto DeclLocs
= getDeclLocsForCommentSearch(D
, SourceMgr
);
295 for (const auto DeclLoc
: DeclLocs
) {
296 // If the declaration doesn't map directly to a location in a file, we
297 // can't find the comment.
298 if (DeclLoc
.isInvalid() || !DeclLoc
.isFileID())
301 if (ExternalSource
&& !CommentsLoaded
) {
302 ExternalSource
->ReadComments();
303 CommentsLoaded
= true;
306 if (Comments
.empty())
309 const FileID File
= SourceMgr
.getDecomposedLoc(DeclLoc
).first
;
313 const auto CommentsInThisFile
= Comments
.getCommentsInFile(File
);
314 if (!CommentsInThisFile
|| CommentsInThisFile
->empty())
317 if (RawComment
*Comment
=
318 getRawCommentForDeclNoCacheImpl(D
, DeclLoc
, *CommentsInThisFile
))
325 void ASTContext::addComment(const RawComment
&RC
) {
326 assert(LangOpts
.RetainCommentsFromSystemHeaders
||
327 !SourceMgr
.isInSystemHeader(RC
.getSourceRange().getBegin()));
328 Comments
.addComment(RC
, LangOpts
.CommentOpts
, BumpAlloc
);
331 /// If we have a 'templated' declaration for a template, adjust 'D' to
332 /// refer to the actual template.
333 /// If we have an implicit instantiation, adjust 'D' to refer to template.
334 static const Decl
&adjustDeclToTemplate(const Decl
&D
) {
335 if (const auto *FD
= dyn_cast
<FunctionDecl
>(&D
)) {
336 // Is this function declaration part of a function template?
337 if (const FunctionTemplateDecl
*FTD
= FD
->getDescribedFunctionTemplate())
340 // Nothing to do if function is not an implicit instantiation.
341 if (FD
->getTemplateSpecializationKind() != TSK_ImplicitInstantiation
)
344 // Function is an implicit instantiation of a function template?
345 if (const FunctionTemplateDecl
*FTD
= FD
->getPrimaryTemplate())
348 // Function is instantiated from a member definition of a class template?
349 if (const FunctionDecl
*MemberDecl
=
350 FD
->getInstantiatedFromMemberFunction())
355 if (const auto *VD
= dyn_cast
<VarDecl
>(&D
)) {
356 // Static data member is instantiated from a member definition of a class
358 if (VD
->isStaticDataMember())
359 if (const VarDecl
*MemberDecl
= VD
->getInstantiatedFromStaticDataMember())
364 if (const auto *CRD
= dyn_cast
<CXXRecordDecl
>(&D
)) {
365 // Is this class declaration part of a class template?
366 if (const ClassTemplateDecl
*CTD
= CRD
->getDescribedClassTemplate())
369 // Class is an implicit instantiation of a class template or partial
371 if (const auto *CTSD
= dyn_cast
<ClassTemplateSpecializationDecl
>(CRD
)) {
372 if (CTSD
->getSpecializationKind() != TSK_ImplicitInstantiation
)
374 llvm::PointerUnion
<ClassTemplateDecl
*,
375 ClassTemplatePartialSpecializationDecl
*>
376 PU
= CTSD
->getSpecializedTemplateOrPartial();
377 return PU
.is
<ClassTemplateDecl
*>()
378 ? *static_cast<const Decl
*>(PU
.get
<ClassTemplateDecl
*>())
379 : *static_cast<const Decl
*>(
380 PU
.get
<ClassTemplatePartialSpecializationDecl
*>());
383 // Class is instantiated from a member definition of a class template?
384 if (const MemberSpecializationInfo
*Info
=
385 CRD
->getMemberSpecializationInfo())
386 return *Info
->getInstantiatedFrom();
390 if (const auto *ED
= dyn_cast
<EnumDecl
>(&D
)) {
391 // Enum is instantiated from a member definition of a class template?
392 if (const EnumDecl
*MemberDecl
= ED
->getInstantiatedFromMemberEnum())
397 // FIXME: Adjust alias templates?
401 const RawComment
*ASTContext::getRawCommentForAnyRedecl(
403 const Decl
**OriginalDecl
) const {
406 OriginalDecl
= nullptr;
410 D
= &adjustDeclToTemplate(*D
);
412 // Any comment directly attached to D?
414 auto DeclComment
= DeclRawComments
.find(D
);
415 if (DeclComment
!= DeclRawComments
.end()) {
418 return DeclComment
->second
;
422 // Any comment attached to any redeclaration of D?
423 const Decl
*CanonicalD
= D
->getCanonicalDecl();
428 auto RedeclComment
= RedeclChainComments
.find(CanonicalD
);
429 if (RedeclComment
!= RedeclChainComments
.end()) {
431 *OriginalDecl
= RedeclComment
->second
;
432 auto CommentAtRedecl
= DeclRawComments
.find(RedeclComment
->second
);
433 assert(CommentAtRedecl
!= DeclRawComments
.end() &&
434 "This decl is supposed to have comment attached.");
435 return CommentAtRedecl
->second
;
439 // Any redeclarations of D that we haven't checked for comments yet?
440 // We can't use DenseMap::iterator directly since it'd get invalid.
441 auto LastCheckedRedecl
= [this, CanonicalD
]() -> const Decl
* {
442 return CommentlessRedeclChains
.lookup(CanonicalD
);
445 for (const auto Redecl
: D
->redecls()) {
447 // Skip all redeclarations that have been checked previously.
448 if (LastCheckedRedecl
) {
449 if (LastCheckedRedecl
== Redecl
) {
450 LastCheckedRedecl
= nullptr;
454 const RawComment
*RedeclComment
= getRawCommentForDeclNoCache(Redecl
);
456 cacheRawCommentForDecl(*Redecl
, *RedeclComment
);
458 *OriginalDecl
= Redecl
;
459 return RedeclComment
;
461 CommentlessRedeclChains
[CanonicalD
] = Redecl
;
465 *OriginalDecl
= nullptr;
469 void ASTContext::cacheRawCommentForDecl(const Decl
&OriginalD
,
470 const RawComment
&Comment
) const {
471 assert(Comment
.isDocumentation() || LangOpts
.CommentOpts
.ParseAllComments
);
472 DeclRawComments
.try_emplace(&OriginalD
, &Comment
);
473 const Decl
*const CanonicalDecl
= OriginalD
.getCanonicalDecl();
474 RedeclChainComments
.try_emplace(CanonicalDecl
, &OriginalD
);
475 CommentlessRedeclChains
.erase(CanonicalDecl
);
478 static void addRedeclaredMethods(const ObjCMethodDecl
*ObjCMethod
,
479 SmallVectorImpl
<const NamedDecl
*> &Redeclared
) {
480 const DeclContext
*DC
= ObjCMethod
->getDeclContext();
481 if (const auto *IMD
= dyn_cast
<ObjCImplDecl
>(DC
)) {
482 const ObjCInterfaceDecl
*ID
= IMD
->getClassInterface();
485 // Add redeclared method here.
486 for (const auto *Ext
: ID
->known_extensions()) {
487 if (ObjCMethodDecl
*RedeclaredMethod
=
488 Ext
->getMethod(ObjCMethod
->getSelector(),
489 ObjCMethod
->isInstanceMethod()))
490 Redeclared
.push_back(RedeclaredMethod
);
495 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef
<Decl
*> Decls
,
496 const Preprocessor
*PP
) {
497 if (Comments
.empty() || Decls
.empty())
501 for (Decl
*D
: Decls
) {
502 SourceLocation Loc
= D
->getLocation();
504 // See if there are any new comments that are not attached to a decl.
505 // The location doesn't have to be precise - we care only about the file.
506 File
= SourceMgr
.getDecomposedLoc(Loc
).first
;
511 if (File
.isInvalid())
514 auto CommentsInThisFile
= Comments
.getCommentsInFile(File
);
515 if (!CommentsInThisFile
|| CommentsInThisFile
->empty() ||
516 CommentsInThisFile
->rbegin()->second
->isAttached())
519 // There is at least one comment not attached to a decl.
520 // Maybe it should be attached to one of Decls?
522 // Note that this way we pick up not only comments that precede the
523 // declaration, but also comments that *follow* the declaration -- thanks to
524 // the lookahead in the lexer: we've consumed the semicolon and looked
525 // ahead through comments.
526 for (const Decl
*D
: Decls
) {
528 if (D
->isInvalidDecl())
531 D
= &adjustDeclToTemplate(*D
);
533 if (DeclRawComments
.count(D
) > 0)
536 const auto DeclLocs
= getDeclLocsForCommentSearch(D
, SourceMgr
);
538 for (const auto DeclLoc
: DeclLocs
) {
539 if (DeclLoc
.isInvalid() || !DeclLoc
.isFileID())
542 if (RawComment
*const DocComment
= getRawCommentForDeclNoCacheImpl(
543 D
, DeclLoc
, *CommentsInThisFile
)) {
544 cacheRawCommentForDecl(*D
, *DocComment
);
545 comments::FullComment
*FC
= DocComment
->parse(*this, PP
, D
);
546 ParsedComments
[D
->getCanonicalDecl()] = FC
;
553 comments::FullComment
*ASTContext::cloneFullComment(comments::FullComment
*FC
,
554 const Decl
*D
) const {
555 auto *ThisDeclInfo
= new (*this) comments::DeclInfo
;
556 ThisDeclInfo
->CommentDecl
= D
;
557 ThisDeclInfo
->IsFilled
= false;
558 ThisDeclInfo
->fill();
559 ThisDeclInfo
->CommentDecl
= FC
->getDecl();
560 if (!ThisDeclInfo
->TemplateParameters
)
561 ThisDeclInfo
->TemplateParameters
= FC
->getDeclInfo()->TemplateParameters
;
562 comments::FullComment
*CFC
=
563 new (*this) comments::FullComment(FC
->getBlocks(),
568 comments::FullComment
*ASTContext::getLocalCommentForDeclUncached(const Decl
*D
) const {
569 const RawComment
*RC
= getRawCommentForDeclNoCache(D
);
570 return RC
? RC
->parse(*this, nullptr, D
) : nullptr;
573 comments::FullComment
*ASTContext::getCommentForDecl(
575 const Preprocessor
*PP
) const {
576 if (!D
|| D
->isInvalidDecl())
578 D
= &adjustDeclToTemplate(*D
);
580 const Decl
*Canonical
= D
->getCanonicalDecl();
581 llvm::DenseMap
<const Decl
*, comments::FullComment
*>::iterator Pos
=
582 ParsedComments
.find(Canonical
);
584 if (Pos
!= ParsedComments
.end()) {
585 if (Canonical
!= D
) {
586 comments::FullComment
*FC
= Pos
->second
;
587 comments::FullComment
*CFC
= cloneFullComment(FC
, D
);
593 const Decl
*OriginalDecl
= nullptr;
595 const RawComment
*RC
= getRawCommentForAnyRedecl(D
, &OriginalDecl
);
597 if (isa
<ObjCMethodDecl
>(D
) || isa
<FunctionDecl
>(D
)) {
598 SmallVector
<const NamedDecl
*, 8> Overridden
;
599 const auto *OMD
= dyn_cast
<ObjCMethodDecl
>(D
);
600 if (OMD
&& OMD
->isPropertyAccessor())
601 if (const ObjCPropertyDecl
*PDecl
= OMD
->findPropertyDecl())
602 if (comments::FullComment
*FC
= getCommentForDecl(PDecl
, PP
))
603 return cloneFullComment(FC
, D
);
605 addRedeclaredMethods(OMD
, Overridden
);
606 getOverriddenMethods(dyn_cast
<NamedDecl
>(D
), Overridden
);
607 for (unsigned i
= 0, e
= Overridden
.size(); i
< e
; i
++)
608 if (comments::FullComment
*FC
= getCommentForDecl(Overridden
[i
], PP
))
609 return cloneFullComment(FC
, D
);
611 else if (const auto *TD
= dyn_cast
<TypedefNameDecl
>(D
)) {
612 // Attach any tag type's documentation to its typedef if latter
613 // does not have one of its own.
614 QualType QT
= TD
->getUnderlyingType();
615 if (const auto *TT
= QT
->getAs
<TagType
>())
616 if (const Decl
*TD
= TT
->getDecl())
617 if (comments::FullComment
*FC
= getCommentForDecl(TD
, PP
))
618 return cloneFullComment(FC
, D
);
620 else if (const auto *IC
= dyn_cast
<ObjCInterfaceDecl
>(D
)) {
621 while (IC
->getSuperClass()) {
622 IC
= IC
->getSuperClass();
623 if (comments::FullComment
*FC
= getCommentForDecl(IC
, PP
))
624 return cloneFullComment(FC
, D
);
627 else if (const auto *CD
= dyn_cast
<ObjCCategoryDecl
>(D
)) {
628 if (const ObjCInterfaceDecl
*IC
= CD
->getClassInterface())
629 if (comments::FullComment
*FC
= getCommentForDecl(IC
, PP
))
630 return cloneFullComment(FC
, D
);
632 else if (const auto *RD
= dyn_cast
<CXXRecordDecl
>(D
)) {
633 if (!(RD
= RD
->getDefinition()))
635 // Check non-virtual bases.
636 for (const auto &I
: RD
->bases()) {
637 if (I
.isVirtual() || (I
.getAccessSpecifier() != AS_public
))
639 QualType Ty
= I
.getType();
642 if (const CXXRecordDecl
*NonVirtualBase
= Ty
->getAsCXXRecordDecl()) {
643 if (!(NonVirtualBase
= NonVirtualBase
->getDefinition()))
646 if (comments::FullComment
*FC
= getCommentForDecl((NonVirtualBase
), PP
))
647 return cloneFullComment(FC
, D
);
650 // Check virtual bases.
651 for (const auto &I
: RD
->vbases()) {
652 if (I
.getAccessSpecifier() != AS_public
)
654 QualType Ty
= I
.getType();
657 if (const CXXRecordDecl
*VirtualBase
= Ty
->getAsCXXRecordDecl()) {
658 if (!(VirtualBase
= VirtualBase
->getDefinition()))
660 if (comments::FullComment
*FC
= getCommentForDecl((VirtualBase
), PP
))
661 return cloneFullComment(FC
, D
);
668 // If the RawComment was attached to other redeclaration of this Decl, we
669 // should parse the comment in context of that other Decl. This is important
670 // because comments can contain references to parameter names which can be
671 // different across redeclarations.
672 if (D
!= OriginalDecl
&& OriginalDecl
)
673 return getCommentForDecl(OriginalDecl
, PP
);
675 comments::FullComment
*FC
= RC
->parse(*this, PP
, D
);
676 ParsedComments
[Canonical
] = FC
;
681 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID
&ID
,
683 TemplateTemplateParmDecl
*Parm
) {
684 ID
.AddInteger(Parm
->getDepth());
685 ID
.AddInteger(Parm
->getPosition());
686 ID
.AddBoolean(Parm
->isParameterPack());
688 TemplateParameterList
*Params
= Parm
->getTemplateParameters();
689 ID
.AddInteger(Params
->size());
690 for (TemplateParameterList::const_iterator P
= Params
->begin(),
691 PEnd
= Params
->end();
693 if (const auto *TTP
= dyn_cast
<TemplateTypeParmDecl
>(*P
)) {
695 ID
.AddBoolean(TTP
->isParameterPack());
696 if (TTP
->isExpandedParameterPack()) {
698 ID
.AddInteger(TTP
->getNumExpansionParameters());
700 ID
.AddBoolean(false);
704 if (const auto *NTTP
= dyn_cast
<NonTypeTemplateParmDecl
>(*P
)) {
706 ID
.AddBoolean(NTTP
->isParameterPack());
707 ID
.AddPointer(C
.getUnconstrainedType(C
.getCanonicalType(NTTP
->getType()))
709 if (NTTP
->isExpandedParameterPack()) {
711 ID
.AddInteger(NTTP
->getNumExpansionTypes());
712 for (unsigned I
= 0, N
= NTTP
->getNumExpansionTypes(); I
!= N
; ++I
) {
713 QualType T
= NTTP
->getExpansionType(I
);
714 ID
.AddPointer(T
.getCanonicalType().getAsOpaquePtr());
717 ID
.AddBoolean(false);
721 auto *TTP
= cast
<TemplateTemplateParmDecl
>(*P
);
727 TemplateTemplateParmDecl
*
728 ASTContext::getCanonicalTemplateTemplateParmDecl(
729 TemplateTemplateParmDecl
*TTP
) const {
730 // Check if we already have a canonical template template parameter.
731 llvm::FoldingSetNodeID ID
;
732 CanonicalTemplateTemplateParm::Profile(ID
, *this, TTP
);
733 void *InsertPos
= nullptr;
734 CanonicalTemplateTemplateParm
*Canonical
735 = CanonTemplateTemplateParms
.FindNodeOrInsertPos(ID
, InsertPos
);
737 return Canonical
->getParam();
739 // Build a canonical template parameter list.
740 TemplateParameterList
*Params
= TTP
->getTemplateParameters();
741 SmallVector
<NamedDecl
*, 4> CanonParams
;
742 CanonParams
.reserve(Params
->size());
743 for (TemplateParameterList::const_iterator P
= Params
->begin(),
744 PEnd
= Params
->end();
746 // Note that, per C++20 [temp.over.link]/6, when determining whether
747 // template-parameters are equivalent, constraints are ignored.
748 if (const auto *TTP
= dyn_cast
<TemplateTypeParmDecl
>(*P
)) {
749 TemplateTypeParmDecl
*NewTTP
= TemplateTypeParmDecl::Create(
750 *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
751 TTP
->getDepth(), TTP
->getIndex(), nullptr, false,
752 TTP
->isParameterPack(), /*HasTypeConstraint=*/false,
753 TTP
->isExpandedParameterPack()
754 ? std::optional
<unsigned>(TTP
->getNumExpansionParameters())
756 CanonParams
.push_back(NewTTP
);
757 } else if (const auto *NTTP
= dyn_cast
<NonTypeTemplateParmDecl
>(*P
)) {
758 QualType T
= getUnconstrainedType(getCanonicalType(NTTP
->getType()));
759 TypeSourceInfo
*TInfo
= getTrivialTypeSourceInfo(T
);
760 NonTypeTemplateParmDecl
*Param
;
761 if (NTTP
->isExpandedParameterPack()) {
762 SmallVector
<QualType
, 2> ExpandedTypes
;
763 SmallVector
<TypeSourceInfo
*, 2> ExpandedTInfos
;
764 for (unsigned I
= 0, N
= NTTP
->getNumExpansionTypes(); I
!= N
; ++I
) {
765 ExpandedTypes
.push_back(getCanonicalType(NTTP
->getExpansionType(I
)));
766 ExpandedTInfos
.push_back(
767 getTrivialTypeSourceInfo(ExpandedTypes
.back()));
770 Param
= NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
774 NTTP
->getPosition(), nullptr,
780 Param
= NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
784 NTTP
->getPosition(), nullptr,
786 NTTP
->isParameterPack(),
789 CanonParams
.push_back(Param
);
791 CanonParams
.push_back(getCanonicalTemplateTemplateParmDecl(
792 cast
<TemplateTemplateParmDecl
>(*P
)));
795 TemplateTemplateParmDecl
*CanonTTP
= TemplateTemplateParmDecl::Create(
796 *this, getTranslationUnitDecl(), SourceLocation(), TTP
->getDepth(),
797 TTP
->getPosition(), TTP
->isParameterPack(), nullptr,
798 TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(),
799 CanonParams
, SourceLocation(),
800 /*RequiresClause=*/nullptr));
802 // Get the new insert position for the node we care about.
803 Canonical
= CanonTemplateTemplateParms
.FindNodeOrInsertPos(ID
, InsertPos
);
804 assert(!Canonical
&& "Shouldn't be in the map!");
807 // Create the canonical template template parameter entry.
808 Canonical
= new (*this) CanonicalTemplateTemplateParm(CanonTTP
);
809 CanonTemplateTemplateParms
.InsertNode(Canonical
, InsertPos
);
813 TargetCXXABI::Kind
ASTContext::getCXXABIKind() const {
814 auto Kind
= getTargetInfo().getCXXABI().getKind();
815 return getLangOpts().CXXABI
.value_or(Kind
);
818 CXXABI
*ASTContext::createCXXABI(const TargetInfo
&T
) {
819 if (!LangOpts
.CPlusPlus
) return nullptr;
821 switch (getCXXABIKind()) {
822 case TargetCXXABI::AppleARM64
:
823 case TargetCXXABI::Fuchsia
:
824 case TargetCXXABI::GenericARM
: // Same as Itanium at this level
825 case TargetCXXABI::iOS
:
826 case TargetCXXABI::WatchOS
:
827 case TargetCXXABI::GenericAArch64
:
828 case TargetCXXABI::GenericMIPS
:
829 case TargetCXXABI::GenericItanium
:
830 case TargetCXXABI::WebAssembly
:
831 case TargetCXXABI::XL
:
832 return CreateItaniumCXXABI(*this);
833 case TargetCXXABI::Microsoft
:
834 return CreateMicrosoftCXXABI(*this);
836 llvm_unreachable("Invalid CXXABI type!");
839 interp::Context
&ASTContext::getInterpContext() {
840 if (!InterpContext
) {
841 InterpContext
.reset(new interp::Context(*this));
843 return *InterpContext
.get();
846 ParentMapContext
&ASTContext::getParentMapContext() {
848 ParentMapCtx
.reset(new ParentMapContext(*this));
849 return *ParentMapCtx
.get();
852 static bool isAddrSpaceMapManglingEnabled(const TargetInfo
&TI
,
853 const LangOptions
&LangOpts
) {
854 switch (LangOpts
.getAddressSpaceMapMangling()) {
855 case LangOptions::ASMM_Target
:
856 return TI
.useAddressSpaceMapMangling();
857 case LangOptions::ASMM_On
:
859 case LangOptions::ASMM_Off
:
862 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
865 ASTContext::ASTContext(LangOptions
&LOpts
, SourceManager
&SM
,
866 IdentifierTable
&idents
, SelectorTable
&sels
,
867 Builtin::Context
&builtins
, TranslationUnitKind TUKind
)
868 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize
),
869 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
870 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
871 DependentSizedMatrixTypes(this_()),
872 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize
),
873 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
874 TemplateSpecializationTypes(this_()),
875 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
876 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
877 CanonTemplateTemplateParms(this_()), SourceMgr(SM
), LangOpts(LOpts
),
878 NoSanitizeL(new NoSanitizeList(LangOpts
.NoSanitizeFiles
, SM
)),
879 XRayFilter(new XRayFunctionFilter(LangOpts
.XRayAlwaysInstrumentFiles
,
880 LangOpts
.XRayNeverInstrumentFiles
,
881 LangOpts
.XRayAttrListFiles
, SM
)),
882 ProfList(new ProfileList(LangOpts
.ProfileListFiles
, SM
)),
883 PrintingPolicy(LOpts
), Idents(idents
), Selectors(sels
),
884 BuiltinInfo(builtins
), TUKind(TUKind
), DeclarationNames(*this),
885 Comments(SM
), CommentCommandTraits(BumpAlloc
, LOpts
.CommentOpts
),
886 CompCategories(this_()), LastSDM(nullptr, 0) {
887 addTranslationUnitDecl();
890 void ASTContext::cleanup() {
891 // Release the DenseMaps associated with DeclContext objects.
892 // FIXME: Is this the ideal solution?
893 ReleaseDeclContextMaps();
895 // Call all of the deallocation functions on all of their targets.
896 for (auto &Pair
: Deallocations
)
897 (Pair
.first
)(Pair
.second
);
898 Deallocations
.clear();
900 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
901 // because they can contain DenseMaps.
902 for (llvm::DenseMap
<const ObjCContainerDecl
*,
903 const ASTRecordLayout
*>::iterator
904 I
= ObjCLayouts
.begin(), E
= ObjCLayouts
.end(); I
!= E
; )
905 // Increment in loop to prevent using deallocated memory.
906 if (auto *R
= const_cast<ASTRecordLayout
*>((I
++)->second
))
910 for (llvm::DenseMap
<const RecordDecl
*, const ASTRecordLayout
*>::iterator
911 I
= ASTRecordLayouts
.begin(), E
= ASTRecordLayouts
.end(); I
!= E
; ) {
912 // Increment in loop to prevent using deallocated memory.
913 if (auto *R
= const_cast<ASTRecordLayout
*>((I
++)->second
))
916 ASTRecordLayouts
.clear();
918 for (llvm::DenseMap
<const Decl
*, AttrVec
*>::iterator A
= DeclAttrs
.begin(),
919 AEnd
= DeclAttrs
.end();
921 A
->second
->~AttrVec();
924 for (const auto &Value
: ModuleInitializers
)
925 Value
.second
->~PerModuleInitializers();
926 ModuleInitializers
.clear();
929 ASTContext::~ASTContext() { cleanup(); }
931 void ASTContext::setTraversalScope(const std::vector
<Decl
*> &TopLevelDecls
) {
932 TraversalScope
= TopLevelDecls
;
933 getParentMapContext().clear();
936 void ASTContext::AddDeallocation(void (*Callback
)(void *), void *Data
) const {
937 Deallocations
.push_back({Callback
, Data
});
941 ASTContext::setExternalSource(IntrusiveRefCntPtr
<ExternalASTSource
> Source
) {
942 ExternalSource
= std::move(Source
);
945 void ASTContext::PrintStats() const {
946 llvm::errs() << "\n*** AST Context Stats:\n";
947 llvm::errs() << " " << Types
.size() << " types total.\n";
949 unsigned counts
[] = {
950 #define TYPE(Name, Parent) 0,
951 #define ABSTRACT_TYPE(Name, Parent)
952 #include "clang/AST/TypeNodes.inc"
956 for (unsigned i
= 0, e
= Types
.size(); i
!= e
; ++i
) {
958 counts
[(unsigned)T
->getTypeClass()]++;
962 unsigned TotalBytes
= 0;
963 #define TYPE(Name, Parent) \
965 llvm::errs() << " " << counts[Idx] << " " << #Name \
966 << " types, " << sizeof(Name##Type) << " each " \
967 << "(" << counts[Idx] * sizeof(Name##Type) \
969 TotalBytes += counts[Idx] * sizeof(Name##Type); \
971 #define ABSTRACT_TYPE(Name, Parent)
972 #include "clang/AST/TypeNodes.inc"
974 llvm::errs() << "Total bytes = " << TotalBytes
<< "\n";
976 // Implicit special member functions.
977 llvm::errs() << NumImplicitDefaultConstructorsDeclared
<< "/"
978 << NumImplicitDefaultConstructors
979 << " implicit default constructors created\n";
980 llvm::errs() << NumImplicitCopyConstructorsDeclared
<< "/"
981 << NumImplicitCopyConstructors
982 << " implicit copy constructors created\n";
983 if (getLangOpts().CPlusPlus
)
984 llvm::errs() << NumImplicitMoveConstructorsDeclared
<< "/"
985 << NumImplicitMoveConstructors
986 << " implicit move constructors created\n";
987 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared
<< "/"
988 << NumImplicitCopyAssignmentOperators
989 << " implicit copy assignment operators created\n";
990 if (getLangOpts().CPlusPlus
)
991 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared
<< "/"
992 << NumImplicitMoveAssignmentOperators
993 << " implicit move assignment operators created\n";
994 llvm::errs() << NumImplicitDestructorsDeclared
<< "/"
995 << NumImplicitDestructors
996 << " implicit destructors created\n";
998 if (ExternalSource
) {
999 llvm::errs() << "\n";
1000 ExternalSource
->PrintStats();
1003 BumpAlloc
.PrintStats();
1006 void ASTContext::mergeDefinitionIntoModule(NamedDecl
*ND
, Module
*M
,
1007 bool NotifyListeners
) {
1008 if (NotifyListeners
)
1009 if (auto *Listener
= getASTMutationListener())
1010 Listener
->RedefinedHiddenDefinition(ND
, M
);
1012 MergedDefModules
[cast
<NamedDecl
>(ND
->getCanonicalDecl())].push_back(M
);
1015 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl
*ND
) {
1016 auto It
= MergedDefModules
.find(cast
<NamedDecl
>(ND
->getCanonicalDecl()));
1017 if (It
== MergedDefModules
.end())
1020 auto &Merged
= It
->second
;
1021 llvm::DenseSet
<Module
*> Found
;
1022 for (Module
*&M
: Merged
)
1023 if (!Found
.insert(M
).second
)
1025 llvm::erase(Merged
, nullptr);
1029 ASTContext::getModulesWithMergedDefinition(const NamedDecl
*Def
) {
1031 MergedDefModules
.find(cast
<NamedDecl
>(Def
->getCanonicalDecl()));
1032 if (MergedIt
== MergedDefModules
.end())
1033 return std::nullopt
;
1034 return MergedIt
->second
;
1037 void ASTContext::PerModuleInitializers::resolve(ASTContext
&Ctx
) {
1038 if (LazyInitializers
.empty())
1041 auto *Source
= Ctx
.getExternalSource();
1042 assert(Source
&& "lazy initializers but no external source");
1044 auto LazyInits
= std::move(LazyInitializers
);
1045 LazyInitializers
.clear();
1047 for (auto ID
: LazyInits
)
1048 Initializers
.push_back(Source
->GetExternalDecl(ID
));
1050 assert(LazyInitializers
.empty() &&
1051 "GetExternalDecl for lazy module initializer added more inits");
1054 void ASTContext::addModuleInitializer(Module
*M
, Decl
*D
) {
1055 // One special case: if we add a module initializer that imports another
1056 // module, and that module's only initializer is an ImportDecl, simplify.
1057 if (const auto *ID
= dyn_cast
<ImportDecl
>(D
)) {
1058 auto It
= ModuleInitializers
.find(ID
->getImportedModule());
1060 // Maybe the ImportDecl does nothing at all. (Common case.)
1061 if (It
== ModuleInitializers
.end())
1064 // Maybe the ImportDecl only imports another ImportDecl.
1065 auto &Imported
= *It
->second
;
1066 if (Imported
.Initializers
.size() + Imported
.LazyInitializers
.size() == 1) {
1067 Imported
.resolve(*this);
1068 auto *OnlyDecl
= Imported
.Initializers
.front();
1069 if (isa
<ImportDecl
>(OnlyDecl
))
1074 auto *&Inits
= ModuleInitializers
[M
];
1076 Inits
= new (*this) PerModuleInitializers
;
1077 Inits
->Initializers
.push_back(D
);
1080 void ASTContext::addLazyModuleInitializers(Module
*M
, ArrayRef
<uint32_t> IDs
) {
1081 auto *&Inits
= ModuleInitializers
[M
];
1083 Inits
= new (*this) PerModuleInitializers
;
1084 Inits
->LazyInitializers
.insert(Inits
->LazyInitializers
.end(),
1085 IDs
.begin(), IDs
.end());
1088 ArrayRef
<Decl
*> ASTContext::getModuleInitializers(Module
*M
) {
1089 auto It
= ModuleInitializers
.find(M
);
1090 if (It
== ModuleInitializers
.end())
1091 return std::nullopt
;
1093 auto *Inits
= It
->second
;
1094 Inits
->resolve(*this);
1095 return Inits
->Initializers
;
1098 void ASTContext::setCurrentNamedModule(Module
*M
) {
1099 assert(M
->isModulePurview());
1100 assert(!CurrentCXXNamedModule
&&
1101 "We should set named module for ASTContext for only once");
1102 CurrentCXXNamedModule
= M
;
1105 ExternCContextDecl
*ASTContext::getExternCContextDecl() const {
1106 if (!ExternCContext
)
1107 ExternCContext
= ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1109 return ExternCContext
;
1112 BuiltinTemplateDecl
*
1113 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK
,
1114 const IdentifierInfo
*II
) const {
1115 auto *BuiltinTemplate
=
1116 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II
, BTK
);
1117 BuiltinTemplate
->setImplicit();
1118 getTranslationUnitDecl()->addDecl(BuiltinTemplate
);
1120 return BuiltinTemplate
;
1123 BuiltinTemplateDecl
*
1124 ASTContext::getMakeIntegerSeqDecl() const {
1125 if (!MakeIntegerSeqDecl
)
1126 MakeIntegerSeqDecl
= buildBuiltinTemplateDecl(BTK__make_integer_seq
,
1127 getMakeIntegerSeqName());
1128 return MakeIntegerSeqDecl
;
1131 BuiltinTemplateDecl
*
1132 ASTContext::getTypePackElementDecl() const {
1133 if (!TypePackElementDecl
)
1134 TypePackElementDecl
= buildBuiltinTemplateDecl(BTK__type_pack_element
,
1135 getTypePackElementName());
1136 return TypePackElementDecl
;
1139 RecordDecl
*ASTContext::buildImplicitRecord(StringRef Name
,
1140 RecordDecl::TagKind TK
) const {
1142 RecordDecl
*NewDecl
;
1143 if (getLangOpts().CPlusPlus
)
1144 NewDecl
= CXXRecordDecl::Create(*this, TK
, getTranslationUnitDecl(), Loc
,
1145 Loc
, &Idents
.get(Name
));
1147 NewDecl
= RecordDecl::Create(*this, TK
, getTranslationUnitDecl(), Loc
, Loc
,
1149 NewDecl
->setImplicit();
1150 NewDecl
->addAttr(TypeVisibilityAttr::CreateImplicit(
1151 const_cast<ASTContext
&>(*this), TypeVisibilityAttr::Default
));
1155 TypedefDecl
*ASTContext::buildImplicitTypedef(QualType T
,
1156 StringRef Name
) const {
1157 TypeSourceInfo
*TInfo
= getTrivialTypeSourceInfo(T
);
1158 TypedefDecl
*NewDecl
= TypedefDecl::Create(
1159 const_cast<ASTContext
&>(*this), getTranslationUnitDecl(),
1160 SourceLocation(), SourceLocation(), &Idents
.get(Name
), TInfo
);
1161 NewDecl
->setImplicit();
1165 TypedefDecl
*ASTContext::getInt128Decl() const {
1167 Int128Decl
= buildImplicitTypedef(Int128Ty
, "__int128_t");
1171 TypedefDecl
*ASTContext::getUInt128Decl() const {
1173 UInt128Decl
= buildImplicitTypedef(UnsignedInt128Ty
, "__uint128_t");
1177 void ASTContext::InitBuiltinType(CanQualType
&R
, BuiltinType::Kind K
) {
1178 auto *Ty
= new (*this, alignof(BuiltinType
)) BuiltinType(K
);
1179 R
= CanQualType::CreateUnsafe(QualType(Ty
, 0));
1180 Types
.push_back(Ty
);
1183 void ASTContext::InitBuiltinTypes(const TargetInfo
&Target
,
1184 const TargetInfo
*AuxTarget
) {
1185 assert((!this->Target
|| this->Target
== &Target
) &&
1186 "Incorrect target reinitialization");
1187 assert(VoidTy
.isNull() && "Context reinitialized?");
1189 this->Target
= &Target
;
1190 this->AuxTarget
= AuxTarget
;
1192 ABI
.reset(createCXXABI(Target
));
1193 AddrSpaceMapMangling
= isAddrSpaceMapManglingEnabled(Target
, LangOpts
);
1196 InitBuiltinType(VoidTy
, BuiltinType::Void
);
1199 InitBuiltinType(BoolTy
, BuiltinType::Bool
);
1201 if (LangOpts
.CharIsSigned
)
1202 InitBuiltinType(CharTy
, BuiltinType::Char_S
);
1204 InitBuiltinType(CharTy
, BuiltinType::Char_U
);
1206 InitBuiltinType(SignedCharTy
, BuiltinType::SChar
);
1207 InitBuiltinType(ShortTy
, BuiltinType::Short
);
1208 InitBuiltinType(IntTy
, BuiltinType::Int
);
1209 InitBuiltinType(LongTy
, BuiltinType::Long
);
1210 InitBuiltinType(LongLongTy
, BuiltinType::LongLong
);
1213 InitBuiltinType(UnsignedCharTy
, BuiltinType::UChar
);
1214 InitBuiltinType(UnsignedShortTy
, BuiltinType::UShort
);
1215 InitBuiltinType(UnsignedIntTy
, BuiltinType::UInt
);
1216 InitBuiltinType(UnsignedLongTy
, BuiltinType::ULong
);
1217 InitBuiltinType(UnsignedLongLongTy
, BuiltinType::ULongLong
);
1220 InitBuiltinType(FloatTy
, BuiltinType::Float
);
1221 InitBuiltinType(DoubleTy
, BuiltinType::Double
);
1222 InitBuiltinType(LongDoubleTy
, BuiltinType::LongDouble
);
1224 // GNU extension, __float128 for IEEE quadruple precision
1225 InitBuiltinType(Float128Ty
, BuiltinType::Float128
);
1227 // __ibm128 for IBM extended precision
1228 InitBuiltinType(Ibm128Ty
, BuiltinType::Ibm128
);
1230 // C11 extension ISO/IEC TS 18661-3
1231 InitBuiltinType(Float16Ty
, BuiltinType::Float16
);
1233 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1234 InitBuiltinType(ShortAccumTy
, BuiltinType::ShortAccum
);
1235 InitBuiltinType(AccumTy
, BuiltinType::Accum
);
1236 InitBuiltinType(LongAccumTy
, BuiltinType::LongAccum
);
1237 InitBuiltinType(UnsignedShortAccumTy
, BuiltinType::UShortAccum
);
1238 InitBuiltinType(UnsignedAccumTy
, BuiltinType::UAccum
);
1239 InitBuiltinType(UnsignedLongAccumTy
, BuiltinType::ULongAccum
);
1240 InitBuiltinType(ShortFractTy
, BuiltinType::ShortFract
);
1241 InitBuiltinType(FractTy
, BuiltinType::Fract
);
1242 InitBuiltinType(LongFractTy
, BuiltinType::LongFract
);
1243 InitBuiltinType(UnsignedShortFractTy
, BuiltinType::UShortFract
);
1244 InitBuiltinType(UnsignedFractTy
, BuiltinType::UFract
);
1245 InitBuiltinType(UnsignedLongFractTy
, BuiltinType::ULongFract
);
1246 InitBuiltinType(SatShortAccumTy
, BuiltinType::SatShortAccum
);
1247 InitBuiltinType(SatAccumTy
, BuiltinType::SatAccum
);
1248 InitBuiltinType(SatLongAccumTy
, BuiltinType::SatLongAccum
);
1249 InitBuiltinType(SatUnsignedShortAccumTy
, BuiltinType::SatUShortAccum
);
1250 InitBuiltinType(SatUnsignedAccumTy
, BuiltinType::SatUAccum
);
1251 InitBuiltinType(SatUnsignedLongAccumTy
, BuiltinType::SatULongAccum
);
1252 InitBuiltinType(SatShortFractTy
, BuiltinType::SatShortFract
);
1253 InitBuiltinType(SatFractTy
, BuiltinType::SatFract
);
1254 InitBuiltinType(SatLongFractTy
, BuiltinType::SatLongFract
);
1255 InitBuiltinType(SatUnsignedShortFractTy
, BuiltinType::SatUShortFract
);
1256 InitBuiltinType(SatUnsignedFractTy
, BuiltinType::SatUFract
);
1257 InitBuiltinType(SatUnsignedLongFractTy
, BuiltinType::SatULongFract
);
1259 // GNU extension, 128-bit integers.
1260 InitBuiltinType(Int128Ty
, BuiltinType::Int128
);
1261 InitBuiltinType(UnsignedInt128Ty
, BuiltinType::UInt128
);
1264 if (TargetInfo::isTypeSigned(Target
.getWCharType()))
1265 InitBuiltinType(WCharTy
, BuiltinType::WChar_S
);
1266 else // -fshort-wchar makes wchar_t be unsigned.
1267 InitBuiltinType(WCharTy
, BuiltinType::WChar_U
);
1268 if (LangOpts
.CPlusPlus
&& LangOpts
.WChar
)
1269 WideCharTy
= WCharTy
;
1271 // C99 (or C++ using -fno-wchar).
1272 WideCharTy
= getFromTargetType(Target
.getWCharType());
1275 WIntTy
= getFromTargetType(Target
.getWIntType());
1278 InitBuiltinType(Char8Ty
, BuiltinType::Char8
);
1280 if (LangOpts
.CPlusPlus
) // C++0x 3.9.1p5, extension for C++
1281 InitBuiltinType(Char16Ty
, BuiltinType::Char16
);
1283 Char16Ty
= getFromTargetType(Target
.getChar16Type());
1285 if (LangOpts
.CPlusPlus
) // C++0x 3.9.1p5, extension for C++
1286 InitBuiltinType(Char32Ty
, BuiltinType::Char32
);
1288 Char32Ty
= getFromTargetType(Target
.getChar32Type());
1290 // Placeholder type for type-dependent expressions whose type is
1291 // completely unknown. No code should ever check a type against
1292 // DependentTy and users should never see it; however, it is here to
1293 // help diagnose failures to properly check for type-dependent
1295 InitBuiltinType(DependentTy
, BuiltinType::Dependent
);
1297 // Placeholder type for functions.
1298 InitBuiltinType(OverloadTy
, BuiltinType::Overload
);
1300 // Placeholder type for bound members.
1301 InitBuiltinType(BoundMemberTy
, BuiltinType::BoundMember
);
1303 // Placeholder type for pseudo-objects.
1304 InitBuiltinType(PseudoObjectTy
, BuiltinType::PseudoObject
);
1306 // "any" type; useful for debugger-like clients.
1307 InitBuiltinType(UnknownAnyTy
, BuiltinType::UnknownAny
);
1309 // Placeholder type for unbridged ARC casts.
1310 InitBuiltinType(ARCUnbridgedCastTy
, BuiltinType::ARCUnbridgedCast
);
1312 // Placeholder type for builtin functions.
1313 InitBuiltinType(BuiltinFnTy
, BuiltinType::BuiltinFn
);
1315 // Placeholder type for OMP array sections.
1316 if (LangOpts
.OpenMP
) {
1317 InitBuiltinType(OMPArraySectionTy
, BuiltinType::OMPArraySection
);
1318 InitBuiltinType(OMPArrayShapingTy
, BuiltinType::OMPArrayShaping
);
1319 InitBuiltinType(OMPIteratorTy
, BuiltinType::OMPIterator
);
1321 if (LangOpts
.MatrixTypes
)
1322 InitBuiltinType(IncompleteMatrixIdxTy
, BuiltinType::IncompleteMatrixIdx
);
1324 // Builtin types for 'id', 'Class', and 'SEL'.
1325 InitBuiltinType(ObjCBuiltinIdTy
, BuiltinType::ObjCId
);
1326 InitBuiltinType(ObjCBuiltinClassTy
, BuiltinType::ObjCClass
);
1327 InitBuiltinType(ObjCBuiltinSelTy
, BuiltinType::ObjCSel
);
1329 if (LangOpts
.OpenCL
) {
1330 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1331 InitBuiltinType(SingletonId, BuiltinType::Id);
1332 #include "clang/Basic/OpenCLImageTypes.def"
1334 InitBuiltinType(OCLSamplerTy
, BuiltinType::OCLSampler
);
1335 InitBuiltinType(OCLEventTy
, BuiltinType::OCLEvent
);
1336 InitBuiltinType(OCLClkEventTy
, BuiltinType::OCLClkEvent
);
1337 InitBuiltinType(OCLQueueTy
, BuiltinType::OCLQueue
);
1338 InitBuiltinType(OCLReserveIDTy
, BuiltinType::OCLReserveID
);
1340 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1341 InitBuiltinType(Id##Ty, BuiltinType::Id);
1342 #include "clang/Basic/OpenCLExtensionTypes.def"
1345 if (Target
.hasAArch64SVETypes()) {
1346 #define SVE_TYPE(Name, Id, SingletonId) \
1347 InitBuiltinType(SingletonId, BuiltinType::Id);
1348 #include "clang/Basic/AArch64SVEACLETypes.def"
1351 if (Target
.getTriple().isPPC64()) {
1352 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1353 InitBuiltinType(Id##Ty, BuiltinType::Id);
1354 #include "clang/Basic/PPCTypes.def"
1355 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1356 InitBuiltinType(Id##Ty, BuiltinType::Id);
1357 #include "clang/Basic/PPCTypes.def"
1360 if (Target
.hasRISCVVTypes()) {
1361 #define RVV_TYPE(Name, Id, SingletonId) \
1362 InitBuiltinType(SingletonId, BuiltinType::Id);
1363 #include "clang/Basic/RISCVVTypes.def"
1366 if (Target
.getTriple().isWasm() && Target
.hasFeature("reference-types")) {
1367 #define WASM_TYPE(Name, Id, SingletonId) \
1368 InitBuiltinType(SingletonId, BuiltinType::Id);
1369 #include "clang/Basic/WebAssemblyReferenceTypes.def"
1372 // Builtin type for __objc_yes and __objc_no
1373 ObjCBuiltinBoolTy
= (Target
.useSignedCharForObjCBool() ?
1374 SignedCharTy
: BoolTy
);
1376 ObjCConstantStringType
= QualType();
1378 ObjCSuperType
= QualType();
1381 if (LangOpts
.OpenCLGenericAddressSpace
) {
1382 auto Q
= VoidTy
.getQualifiers();
1383 Q
.setAddressSpace(LangAS::opencl_generic
);
1384 VoidPtrTy
= getPointerType(getCanonicalType(
1385 getQualifiedType(VoidTy
.getUnqualifiedType(), Q
)));
1387 VoidPtrTy
= getPointerType(VoidTy
);
1390 // nullptr type (C++0x 2.14.7)
1391 InitBuiltinType(NullPtrTy
, BuiltinType::NullPtr
);
1393 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1394 InitBuiltinType(HalfTy
, BuiltinType::Half
);
1396 InitBuiltinType(BFloat16Ty
, BuiltinType::BFloat16
);
1398 // Builtin type used to help define __builtin_va_list.
1399 VaListTagDecl
= nullptr;
1401 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1402 if (LangOpts
.MicrosoftExt
|| LangOpts
.Borland
) {
1403 MSGuidTagDecl
= buildImplicitRecord("_GUID");
1404 getTranslationUnitDecl()->addDecl(MSGuidTagDecl
);
1408 DiagnosticsEngine
&ASTContext::getDiagnostics() const {
1409 return SourceMgr
.getDiagnostics();
1412 AttrVec
& ASTContext::getDeclAttrs(const Decl
*D
) {
1413 AttrVec
*&Result
= DeclAttrs
[D
];
1415 void *Mem
= Allocate(sizeof(AttrVec
));
1416 Result
= new (Mem
) AttrVec
;
1422 /// Erase the attributes corresponding to the given declaration.
1423 void ASTContext::eraseDeclAttrs(const Decl
*D
) {
1424 llvm::DenseMap
<const Decl
*, AttrVec
*>::iterator Pos
= DeclAttrs
.find(D
);
1425 if (Pos
!= DeclAttrs
.end()) {
1426 Pos
->second
->~AttrVec();
1427 DeclAttrs
.erase(Pos
);
1432 MemberSpecializationInfo
*
1433 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl
*Var
) {
1434 assert(Var
->isStaticDataMember() && "Not a static data member");
1435 return getTemplateOrSpecializationInfo(Var
)
1436 .dyn_cast
<MemberSpecializationInfo
*>();
1439 ASTContext::TemplateOrSpecializationInfo
1440 ASTContext::getTemplateOrSpecializationInfo(const VarDecl
*Var
) {
1441 llvm::DenseMap
<const VarDecl
*, TemplateOrSpecializationInfo
>::iterator Pos
=
1442 TemplateOrInstantiation
.find(Var
);
1443 if (Pos
== TemplateOrInstantiation
.end())
1450 ASTContext::setInstantiatedFromStaticDataMember(VarDecl
*Inst
, VarDecl
*Tmpl
,
1451 TemplateSpecializationKind TSK
,
1452 SourceLocation PointOfInstantiation
) {
1453 assert(Inst
->isStaticDataMember() && "Not a static data member");
1454 assert(Tmpl
->isStaticDataMember() && "Not a static data member");
1455 setTemplateOrSpecializationInfo(Inst
, new (*this) MemberSpecializationInfo(
1456 Tmpl
, TSK
, PointOfInstantiation
));
1460 ASTContext::setTemplateOrSpecializationInfo(VarDecl
*Inst
,
1461 TemplateOrSpecializationInfo TSI
) {
1462 assert(!TemplateOrInstantiation
[Inst
] &&
1463 "Already noted what the variable was instantiated from");
1464 TemplateOrInstantiation
[Inst
] = TSI
;
1468 ASTContext::getInstantiatedFromUsingDecl(NamedDecl
*UUD
) {
1469 return InstantiatedFromUsingDecl
.lookup(UUD
);
1473 ASTContext::setInstantiatedFromUsingDecl(NamedDecl
*Inst
, NamedDecl
*Pattern
) {
1474 assert((isa
<UsingDecl
>(Pattern
) ||
1475 isa
<UnresolvedUsingValueDecl
>(Pattern
) ||
1476 isa
<UnresolvedUsingTypenameDecl
>(Pattern
)) &&
1477 "pattern decl is not a using decl");
1478 assert((isa
<UsingDecl
>(Inst
) ||
1479 isa
<UnresolvedUsingValueDecl
>(Inst
) ||
1480 isa
<UnresolvedUsingTypenameDecl
>(Inst
)) &&
1481 "instantiation did not produce a using decl");
1482 assert(!InstantiatedFromUsingDecl
[Inst
] && "pattern already exists");
1483 InstantiatedFromUsingDecl
[Inst
] = Pattern
;
1487 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl
*UUD
) {
1488 return InstantiatedFromUsingEnumDecl
.lookup(UUD
);
1491 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl
*Inst
,
1492 UsingEnumDecl
*Pattern
) {
1493 assert(!InstantiatedFromUsingEnumDecl
[Inst
] && "pattern already exists");
1494 InstantiatedFromUsingEnumDecl
[Inst
] = Pattern
;
1498 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl
*Inst
) {
1499 return InstantiatedFromUsingShadowDecl
.lookup(Inst
);
1503 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl
*Inst
,
1504 UsingShadowDecl
*Pattern
) {
1505 assert(!InstantiatedFromUsingShadowDecl
[Inst
] && "pattern already exists");
1506 InstantiatedFromUsingShadowDecl
[Inst
] = Pattern
;
1509 FieldDecl
*ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl
*Field
) {
1510 return InstantiatedFromUnnamedFieldDecl
.lookup(Field
);
1513 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl
*Inst
,
1515 assert(!Inst
->getDeclName() && "Instantiated field decl is not unnamed");
1516 assert(!Tmpl
->getDeclName() && "Template field decl is not unnamed");
1517 assert(!InstantiatedFromUnnamedFieldDecl
[Inst
] &&
1518 "Already noted what unnamed field was instantiated from");
1520 InstantiatedFromUnnamedFieldDecl
[Inst
] = Tmpl
;
1523 ASTContext::overridden_cxx_method_iterator
1524 ASTContext::overridden_methods_begin(const CXXMethodDecl
*Method
) const {
1525 return overridden_methods(Method
).begin();
1528 ASTContext::overridden_cxx_method_iterator
1529 ASTContext::overridden_methods_end(const CXXMethodDecl
*Method
) const {
1530 return overridden_methods(Method
).end();
1534 ASTContext::overridden_methods_size(const CXXMethodDecl
*Method
) const {
1535 auto Range
= overridden_methods(Method
);
1536 return Range
.end() - Range
.begin();
1539 ASTContext::overridden_method_range
1540 ASTContext::overridden_methods(const CXXMethodDecl
*Method
) const {
1541 llvm::DenseMap
<const CXXMethodDecl
*, CXXMethodVector
>::const_iterator Pos
=
1542 OverriddenMethods
.find(Method
->getCanonicalDecl());
1543 if (Pos
== OverriddenMethods
.end())
1544 return overridden_method_range(nullptr, nullptr);
1545 return overridden_method_range(Pos
->second
.begin(), Pos
->second
.end());
1548 void ASTContext::addOverriddenMethod(const CXXMethodDecl
*Method
,
1549 const CXXMethodDecl
*Overridden
) {
1550 assert(Method
->isCanonicalDecl() && Overridden
->isCanonicalDecl());
1551 OverriddenMethods
[Method
].push_back(Overridden
);
1554 void ASTContext::getOverriddenMethods(
1556 SmallVectorImpl
<const NamedDecl
*> &Overridden
) const {
1559 if (const auto *CXXMethod
= dyn_cast
<CXXMethodDecl
>(D
)) {
1560 Overridden
.append(overridden_methods_begin(CXXMethod
),
1561 overridden_methods_end(CXXMethod
));
1565 const auto *Method
= dyn_cast
<ObjCMethodDecl
>(D
);
1569 SmallVector
<const ObjCMethodDecl
*, 8> OverDecls
;
1570 Method
->getOverriddenMethods(OverDecls
);
1571 Overridden
.append(OverDecls
.begin(), OverDecls
.end());
1574 void ASTContext::addedLocalImportDecl(ImportDecl
*Import
) {
1575 assert(!Import
->getNextLocalImport() &&
1576 "Import declaration already in the chain");
1577 assert(!Import
->isFromASTFile() && "Non-local import declaration");
1578 if (!FirstLocalImport
) {
1579 FirstLocalImport
= Import
;
1580 LastLocalImport
= Import
;
1584 LastLocalImport
->setNextLocalImport(Import
);
1585 LastLocalImport
= Import
;
1588 //===----------------------------------------------------------------------===//
1589 // Type Sizing and Analysis
1590 //===----------------------------------------------------------------------===//
1592 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1593 /// scalar floating point type.
1594 const llvm::fltSemantics
&ASTContext::getFloatTypeSemantics(QualType T
) const {
1595 switch (T
->castAs
<BuiltinType
>()->getKind()) {
1597 llvm_unreachable("Not a floating point type!");
1598 case BuiltinType::BFloat16
:
1599 return Target
->getBFloat16Format();
1600 case BuiltinType::Float16
:
1601 return Target
->getHalfFormat();
1602 case BuiltinType::Half
:
1603 // For HLSL, when the native half type is disabled, half will be treat as
1605 if (getLangOpts().HLSL
)
1606 if (getLangOpts().NativeHalfType
)
1607 return Target
->getHalfFormat();
1609 return Target
->getFloatFormat();
1611 return Target
->getHalfFormat();
1612 case BuiltinType::Float
: return Target
->getFloatFormat();
1613 case BuiltinType::Double
: return Target
->getDoubleFormat();
1614 case BuiltinType::Ibm128
:
1615 return Target
->getIbm128Format();
1616 case BuiltinType::LongDouble
:
1617 if (getLangOpts().OpenMP
&& getLangOpts().OpenMPIsTargetDevice
)
1618 return AuxTarget
->getLongDoubleFormat();
1619 return Target
->getLongDoubleFormat();
1620 case BuiltinType::Float128
:
1621 if (getLangOpts().OpenMP
&& getLangOpts().OpenMPIsTargetDevice
)
1622 return AuxTarget
->getFloat128Format();
1623 return Target
->getFloat128Format();
1627 CharUnits
ASTContext::getDeclAlign(const Decl
*D
, bool ForAlignof
) const {
1628 unsigned Align
= Target
->getCharWidth();
1630 bool UseAlignAttrOnly
= false;
1631 if (unsigned AlignFromAttr
= D
->getMaxAlignment()) {
1632 Align
= AlignFromAttr
;
1634 // __attribute__((aligned)) can increase or decrease alignment
1635 // *except* on a struct or struct member, where it only increases
1636 // alignment unless 'packed' is also specified.
1638 // It is an error for alignas to decrease alignment, so we can
1639 // ignore that possibility; Sema should diagnose it.
1640 if (isa
<FieldDecl
>(D
)) {
1641 UseAlignAttrOnly
= D
->hasAttr
<PackedAttr
>() ||
1642 cast
<FieldDecl
>(D
)->getParent()->hasAttr
<PackedAttr
>();
1644 UseAlignAttrOnly
= true;
1647 else if (isa
<FieldDecl
>(D
))
1649 D
->hasAttr
<PackedAttr
>() ||
1650 cast
<FieldDecl
>(D
)->getParent()->hasAttr
<PackedAttr
>();
1652 // If we're using the align attribute only, just ignore everything
1653 // else about the declaration and its type.
1654 if (UseAlignAttrOnly
) {
1656 } else if (const auto *VD
= dyn_cast
<ValueDecl
>(D
)) {
1657 QualType T
= VD
->getType();
1658 if (const auto *RT
= T
->getAs
<ReferenceType
>()) {
1660 T
= RT
->getPointeeType();
1662 T
= getPointerType(RT
->getPointeeType());
1664 QualType BaseT
= getBaseElementType(T
);
1665 if (T
->isFunctionType())
1666 Align
= getTypeInfoImpl(T
.getTypePtr()).Align
;
1667 else if (!BaseT
->isIncompleteType()) {
1668 // Adjust alignments of declarations with array type by the
1669 // large-array alignment on the target.
1670 if (const ArrayType
*arrayType
= getAsArrayType(T
)) {
1671 unsigned MinWidth
= Target
->getLargeArrayMinWidth();
1672 if (!ForAlignof
&& MinWidth
) {
1673 if (isa
<VariableArrayType
>(arrayType
))
1674 Align
= std::max(Align
, Target
->getLargeArrayAlign());
1675 else if (isa
<ConstantArrayType
>(arrayType
) &&
1676 MinWidth
<= getTypeSize(cast
<ConstantArrayType
>(arrayType
)))
1677 Align
= std::max(Align
, Target
->getLargeArrayAlign());
1680 Align
= std::max(Align
, getPreferredTypeAlign(T
.getTypePtr()));
1681 if (BaseT
.getQualifiers().hasUnaligned())
1682 Align
= Target
->getCharWidth();
1683 if (const auto *VD
= dyn_cast
<VarDecl
>(D
)) {
1684 if (VD
->hasGlobalStorage() && !ForAlignof
) {
1685 uint64_t TypeSize
= getTypeSize(T
.getTypePtr());
1686 Align
= std::max(Align
, getTargetInfo().getMinGlobalAlign(TypeSize
));
1691 // Fields can be subject to extra alignment constraints, like if
1692 // the field is packed, the struct is packed, or the struct has a
1693 // a max-field-alignment constraint (#pragma pack). So calculate
1694 // the actual alignment of the field within the struct, and then
1695 // (as we're expected to) constrain that by the alignment of the type.
1696 if (const auto *Field
= dyn_cast
<FieldDecl
>(VD
)) {
1697 const RecordDecl
*Parent
= Field
->getParent();
1698 // We can only produce a sensible answer if the record is valid.
1699 if (!Parent
->isInvalidDecl()) {
1700 const ASTRecordLayout
&Layout
= getASTRecordLayout(Parent
);
1702 // Start with the record's overall alignment.
1703 unsigned FieldAlign
= toBits(Layout
.getAlignment());
1705 // Use the GCD of that and the offset within the record.
1706 uint64_t Offset
= Layout
.getFieldOffset(Field
->getFieldIndex());
1708 // Alignment is always a power of 2, so the GCD will be a power of 2,
1709 // which means we get to do this crazy thing instead of Euclid's.
1710 uint64_t LowBitOfOffset
= Offset
& (~Offset
+ 1);
1711 if (LowBitOfOffset
< FieldAlign
)
1712 FieldAlign
= static_cast<unsigned>(LowBitOfOffset
);
1715 Align
= std::min(Align
, FieldAlign
);
1720 // Some targets have hard limitation on the maximum requestable alignment in
1721 // aligned attribute for static variables.
1722 const unsigned MaxAlignedAttr
= getTargetInfo().getMaxAlignedAttribute();
1723 const auto *VD
= dyn_cast
<VarDecl
>(D
);
1724 if (MaxAlignedAttr
&& VD
&& VD
->getStorageClass() == SC_Static
)
1725 Align
= std::min(Align
, MaxAlignedAttr
);
1727 return toCharUnitsFromBits(Align
);
1730 CharUnits
ASTContext::getExnObjectAlignment() const {
1731 return toCharUnitsFromBits(Target
->getExnObjectAlignment());
1734 // getTypeInfoDataSizeInChars - Return the size of a type, in
1735 // chars. If the type is a record, its data size is returned. This is
1736 // the size of the memcpy that's performed when assigning this type
1737 // using a trivial copy/move assignment operator.
1738 TypeInfoChars
ASTContext::getTypeInfoDataSizeInChars(QualType T
) const {
1739 TypeInfoChars Info
= getTypeInfoInChars(T
);
1741 // In C++, objects can sometimes be allocated into the tail padding
1742 // of a base-class subobject. We decide whether that's possible
1743 // during class layout, so here we can just trust the layout results.
1744 if (getLangOpts().CPlusPlus
) {
1745 if (const auto *RT
= T
->getAs
<RecordType
>()) {
1746 const ASTRecordLayout
&layout
= getASTRecordLayout(RT
->getDecl());
1747 Info
.Width
= layout
.getDataSize();
1754 /// getConstantArrayInfoInChars - Performing the computation in CharUnits
1755 /// instead of in bits prevents overflowing the uint64_t for some large arrays.
1757 static getConstantArrayInfoInChars(const ASTContext
&Context
,
1758 const ConstantArrayType
*CAT
) {
1759 TypeInfoChars EltInfo
= Context
.getTypeInfoInChars(CAT
->getElementType());
1760 uint64_t Size
= CAT
->getSize().getZExtValue();
1761 assert((Size
== 0 || static_cast<uint64_t>(EltInfo
.Width
.getQuantity()) <=
1762 (uint64_t)(-1)/Size
) &&
1763 "Overflow in array type char size evaluation");
1764 uint64_t Width
= EltInfo
.Width
.getQuantity() * Size
;
1765 unsigned Align
= EltInfo
.Align
.getQuantity();
1766 if (!Context
.getTargetInfo().getCXXABI().isMicrosoft() ||
1767 Context
.getTargetInfo().getPointerWidth(LangAS::Default
) == 64)
1768 Width
= llvm::alignTo(Width
, Align
);
1769 return TypeInfoChars(CharUnits::fromQuantity(Width
),
1770 CharUnits::fromQuantity(Align
),
1771 EltInfo
.AlignRequirement
);
1774 TypeInfoChars
ASTContext::getTypeInfoInChars(const Type
*T
) const {
1775 if (const auto *CAT
= dyn_cast
<ConstantArrayType
>(T
))
1776 return getConstantArrayInfoInChars(*this, CAT
);
1777 TypeInfo Info
= getTypeInfo(T
);
1778 return TypeInfoChars(toCharUnitsFromBits(Info
.Width
),
1779 toCharUnitsFromBits(Info
.Align
), Info
.AlignRequirement
);
1782 TypeInfoChars
ASTContext::getTypeInfoInChars(QualType T
) const {
1783 return getTypeInfoInChars(T
.getTypePtr());
1786 bool ASTContext::isPromotableIntegerType(QualType T
) const {
1787 // HLSL doesn't promote all small integer types to int, it
1788 // just uses the rank-based promotion rules for all types.
1789 if (getLangOpts().HLSL
)
1792 if (const auto *BT
= T
->getAs
<BuiltinType
>())
1793 switch (BT
->getKind()) {
1794 case BuiltinType::Bool
:
1795 case BuiltinType::Char_S
:
1796 case BuiltinType::Char_U
:
1797 case BuiltinType::SChar
:
1798 case BuiltinType::UChar
:
1799 case BuiltinType::Short
:
1800 case BuiltinType::UShort
:
1801 case BuiltinType::WChar_S
:
1802 case BuiltinType::WChar_U
:
1803 case BuiltinType::Char8
:
1804 case BuiltinType::Char16
:
1805 case BuiltinType::Char32
:
1811 // Enumerated types are promotable to their compatible integer types
1812 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
1813 if (const auto *ET
= T
->getAs
<EnumType
>()) {
1814 if (T
->isDependentType() || ET
->getDecl()->getPromotionType().isNull() ||
1815 ET
->getDecl()->isScoped())
1824 bool ASTContext::isAlignmentRequired(const Type
*T
) const {
1825 return getTypeInfo(T
).AlignRequirement
!= AlignRequirementKind::None
;
1828 bool ASTContext::isAlignmentRequired(QualType T
) const {
1829 return isAlignmentRequired(T
.getTypePtr());
1832 unsigned ASTContext::getTypeAlignIfKnown(QualType T
,
1833 bool NeedsPreferredAlignment
) const {
1834 // An alignment on a typedef overrides anything else.
1835 if (const auto *TT
= T
->getAs
<TypedefType
>())
1836 if (unsigned Align
= TT
->getDecl()->getMaxAlignment())
1839 // If we have an (array of) complete type, we're done.
1840 T
= getBaseElementType(T
);
1841 if (!T
->isIncompleteType())
1842 return NeedsPreferredAlignment
? getPreferredTypeAlign(T
) : getTypeAlign(T
);
1844 // If we had an array type, its element type might be a typedef
1845 // type with an alignment attribute.
1846 if (const auto *TT
= T
->getAs
<TypedefType
>())
1847 if (unsigned Align
= TT
->getDecl()->getMaxAlignment())
1850 // Otherwise, see if the declaration of the type had an attribute.
1851 if (const auto *TT
= T
->getAs
<TagType
>())
1852 return TT
->getDecl()->getMaxAlignment();
1857 TypeInfo
ASTContext::getTypeInfo(const Type
*T
) const {
1858 TypeInfoMap::iterator I
= MemoizedTypeInfo
.find(T
);
1859 if (I
!= MemoizedTypeInfo
.end())
1862 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1863 TypeInfo TI
= getTypeInfoImpl(T
);
1864 MemoizedTypeInfo
[T
] = TI
;
1868 /// getTypeInfoImpl - Return the size of the specified type, in bits. This
1869 /// method does not work on incomplete types.
1871 /// FIXME: Pointers into different addr spaces could have different sizes and
1872 /// alignment requirements: getPointerInfo should take an AddrSpace, this
1873 /// should take a QualType, &c.
1874 TypeInfo
ASTContext::getTypeInfoImpl(const Type
*T
) const {
1877 AlignRequirementKind AlignRequirement
= AlignRequirementKind::None
;
1878 LangAS AS
= LangAS::Default
;
1879 switch (T
->getTypeClass()) {
1880 #define TYPE(Class, Base)
1881 #define ABSTRACT_TYPE(Class, Base)
1882 #define NON_CANONICAL_TYPE(Class, Base)
1883 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1884 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1886 assert(!T->isDependentType() && "should not see dependent types here"); \
1887 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1888 #include "clang/AST/TypeNodes.inc"
1889 llvm_unreachable("Should not see dependent types");
1891 case Type::FunctionNoProto
:
1892 case Type::FunctionProto
:
1893 // GCC extension: alignof(function) = 32 bits
1898 case Type::IncompleteArray
:
1899 case Type::VariableArray
:
1900 case Type::ConstantArray
: {
1901 // Model non-constant sized arrays as size zero, but track the alignment.
1903 if (const auto *CAT
= dyn_cast
<ConstantArrayType
>(T
))
1904 Size
= CAT
->getSize().getZExtValue();
1906 TypeInfo EltInfo
= getTypeInfo(cast
<ArrayType
>(T
)->getElementType());
1907 assert((Size
== 0 || EltInfo
.Width
<= (uint64_t)(-1) / Size
) &&
1908 "Overflow in array type bit size evaluation");
1909 Width
= EltInfo
.Width
* Size
;
1910 Align
= EltInfo
.Align
;
1911 AlignRequirement
= EltInfo
.AlignRequirement
;
1912 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1913 getTargetInfo().getPointerWidth(LangAS::Default
) == 64)
1914 Width
= llvm::alignTo(Width
, Align
);
1918 case Type::ExtVector
:
1919 case Type::Vector
: {
1920 const auto *VT
= cast
<VectorType
>(T
);
1921 TypeInfo EltInfo
= getTypeInfo(VT
->getElementType());
1922 Width
= VT
->isExtVectorBoolType() ? VT
->getNumElements()
1923 : EltInfo
.Width
* VT
->getNumElements();
1924 // Enforce at least byte size and alignment.
1925 Width
= std::max
<unsigned>(8, Width
);
1926 Align
= std::max
<unsigned>(8, Width
);
1928 // If the alignment is not a power of 2, round up to the next power of 2.
1929 // This happens for non-power-of-2 length vectors.
1930 if (Align
& (Align
-1)) {
1931 Align
= llvm::bit_ceil(Align
);
1932 Width
= llvm::alignTo(Width
, Align
);
1934 // Adjust the alignment based on the target max.
1935 uint64_t TargetVectorAlign
= Target
->getMaxVectorAlign();
1936 if (TargetVectorAlign
&& TargetVectorAlign
< Align
)
1937 Align
= TargetVectorAlign
;
1938 if (VT
->getVectorKind() == VectorKind::SveFixedLengthData
)
1939 // Adjust the alignment for fixed-length SVE vectors. This is important
1940 // for non-power-of-2 vector lengths.
1942 else if (VT
->getVectorKind() == VectorKind::SveFixedLengthPredicate
)
1943 // Adjust the alignment for fixed-length SVE predicates.
1945 else if (VT
->getVectorKind() == VectorKind::RVVFixedLengthData
)
1946 // Adjust the alignment for fixed-length RVV vectors.
1947 Align
= std::min
<unsigned>(64, Width
);
1951 case Type::ConstantMatrix
: {
1952 const auto *MT
= cast
<ConstantMatrixType
>(T
);
1953 TypeInfo ElementInfo
= getTypeInfo(MT
->getElementType());
1954 // The internal layout of a matrix value is implementation defined.
1955 // Initially be ABI compatible with arrays with respect to alignment and
1957 Width
= ElementInfo
.Width
* MT
->getNumRows() * MT
->getNumColumns();
1958 Align
= ElementInfo
.Align
;
1963 switch (cast
<BuiltinType
>(T
)->getKind()) {
1964 default: llvm_unreachable("Unknown builtin type!");
1965 case BuiltinType::Void
:
1966 // GCC extension: alignof(void) = 8 bits.
1970 case BuiltinType::Bool
:
1971 Width
= Target
->getBoolWidth();
1972 Align
= Target
->getBoolAlign();
1974 case BuiltinType::Char_S
:
1975 case BuiltinType::Char_U
:
1976 case BuiltinType::UChar
:
1977 case BuiltinType::SChar
:
1978 case BuiltinType::Char8
:
1979 Width
= Target
->getCharWidth();
1980 Align
= Target
->getCharAlign();
1982 case BuiltinType::WChar_S
:
1983 case BuiltinType::WChar_U
:
1984 Width
= Target
->getWCharWidth();
1985 Align
= Target
->getWCharAlign();
1987 case BuiltinType::Char16
:
1988 Width
= Target
->getChar16Width();
1989 Align
= Target
->getChar16Align();
1991 case BuiltinType::Char32
:
1992 Width
= Target
->getChar32Width();
1993 Align
= Target
->getChar32Align();
1995 case BuiltinType::UShort
:
1996 case BuiltinType::Short
:
1997 Width
= Target
->getShortWidth();
1998 Align
= Target
->getShortAlign();
2000 case BuiltinType::UInt
:
2001 case BuiltinType::Int
:
2002 Width
= Target
->getIntWidth();
2003 Align
= Target
->getIntAlign();
2005 case BuiltinType::ULong
:
2006 case BuiltinType::Long
:
2007 Width
= Target
->getLongWidth();
2008 Align
= Target
->getLongAlign();
2010 case BuiltinType::ULongLong
:
2011 case BuiltinType::LongLong
:
2012 Width
= Target
->getLongLongWidth();
2013 Align
= Target
->getLongLongAlign();
2015 case BuiltinType::Int128
:
2016 case BuiltinType::UInt128
:
2018 Align
= Target
->getInt128Align();
2020 case BuiltinType::ShortAccum
:
2021 case BuiltinType::UShortAccum
:
2022 case BuiltinType::SatShortAccum
:
2023 case BuiltinType::SatUShortAccum
:
2024 Width
= Target
->getShortAccumWidth();
2025 Align
= Target
->getShortAccumAlign();
2027 case BuiltinType::Accum
:
2028 case BuiltinType::UAccum
:
2029 case BuiltinType::SatAccum
:
2030 case BuiltinType::SatUAccum
:
2031 Width
= Target
->getAccumWidth();
2032 Align
= Target
->getAccumAlign();
2034 case BuiltinType::LongAccum
:
2035 case BuiltinType::ULongAccum
:
2036 case BuiltinType::SatLongAccum
:
2037 case BuiltinType::SatULongAccum
:
2038 Width
= Target
->getLongAccumWidth();
2039 Align
= Target
->getLongAccumAlign();
2041 case BuiltinType::ShortFract
:
2042 case BuiltinType::UShortFract
:
2043 case BuiltinType::SatShortFract
:
2044 case BuiltinType::SatUShortFract
:
2045 Width
= Target
->getShortFractWidth();
2046 Align
= Target
->getShortFractAlign();
2048 case BuiltinType::Fract
:
2049 case BuiltinType::UFract
:
2050 case BuiltinType::SatFract
:
2051 case BuiltinType::SatUFract
:
2052 Width
= Target
->getFractWidth();
2053 Align
= Target
->getFractAlign();
2055 case BuiltinType::LongFract
:
2056 case BuiltinType::ULongFract
:
2057 case BuiltinType::SatLongFract
:
2058 case BuiltinType::SatULongFract
:
2059 Width
= Target
->getLongFractWidth();
2060 Align
= Target
->getLongFractAlign();
2062 case BuiltinType::BFloat16
:
2063 if (Target
->hasBFloat16Type()) {
2064 Width
= Target
->getBFloat16Width();
2065 Align
= Target
->getBFloat16Align();
2066 } else if ((getLangOpts().SYCLIsDevice
||
2067 (getLangOpts().OpenMP
&&
2068 getLangOpts().OpenMPIsTargetDevice
)) &&
2069 AuxTarget
->hasBFloat16Type()) {
2070 Width
= AuxTarget
->getBFloat16Width();
2071 Align
= AuxTarget
->getBFloat16Align();
2074 case BuiltinType::Float16
:
2075 case BuiltinType::Half
:
2076 if (Target
->hasFloat16Type() || !getLangOpts().OpenMP
||
2077 !getLangOpts().OpenMPIsTargetDevice
) {
2078 Width
= Target
->getHalfWidth();
2079 Align
= Target
->getHalfAlign();
2081 assert(getLangOpts().OpenMP
&& getLangOpts().OpenMPIsTargetDevice
&&
2082 "Expected OpenMP device compilation.");
2083 Width
= AuxTarget
->getHalfWidth();
2084 Align
= AuxTarget
->getHalfAlign();
2087 case BuiltinType::Float
:
2088 Width
= Target
->getFloatWidth();
2089 Align
= Target
->getFloatAlign();
2091 case BuiltinType::Double
:
2092 Width
= Target
->getDoubleWidth();
2093 Align
= Target
->getDoubleAlign();
2095 case BuiltinType::Ibm128
:
2096 Width
= Target
->getIbm128Width();
2097 Align
= Target
->getIbm128Align();
2099 case BuiltinType::LongDouble
:
2100 if (getLangOpts().OpenMP
&& getLangOpts().OpenMPIsTargetDevice
&&
2101 (Target
->getLongDoubleWidth() != AuxTarget
->getLongDoubleWidth() ||
2102 Target
->getLongDoubleAlign() != AuxTarget
->getLongDoubleAlign())) {
2103 Width
= AuxTarget
->getLongDoubleWidth();
2104 Align
= AuxTarget
->getLongDoubleAlign();
2106 Width
= Target
->getLongDoubleWidth();
2107 Align
= Target
->getLongDoubleAlign();
2110 case BuiltinType::Float128
:
2111 if (Target
->hasFloat128Type() || !getLangOpts().OpenMP
||
2112 !getLangOpts().OpenMPIsTargetDevice
) {
2113 Width
= Target
->getFloat128Width();
2114 Align
= Target
->getFloat128Align();
2116 assert(getLangOpts().OpenMP
&& getLangOpts().OpenMPIsTargetDevice
&&
2117 "Expected OpenMP device compilation.");
2118 Width
= AuxTarget
->getFloat128Width();
2119 Align
= AuxTarget
->getFloat128Align();
2122 case BuiltinType::NullPtr
:
2123 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2124 Width
= Target
->getPointerWidth(LangAS::Default
);
2125 Align
= Target
->getPointerAlign(LangAS::Default
);
2127 case BuiltinType::ObjCId
:
2128 case BuiltinType::ObjCClass
:
2129 case BuiltinType::ObjCSel
:
2130 Width
= Target
->getPointerWidth(LangAS::Default
);
2131 Align
= Target
->getPointerAlign(LangAS::Default
);
2133 case BuiltinType::OCLSampler
:
2134 case BuiltinType::OCLEvent
:
2135 case BuiltinType::OCLClkEvent
:
2136 case BuiltinType::OCLQueue
:
2137 case BuiltinType::OCLReserveID
:
2138 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2139 case BuiltinType::Id:
2140 #include "clang/Basic/OpenCLImageTypes.def"
2141 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2142 case BuiltinType::Id:
2143 #include "clang/Basic/OpenCLExtensionTypes.def"
2144 AS
= Target
->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T
));
2145 Width
= Target
->getPointerWidth(AS
);
2146 Align
= Target
->getPointerAlign(AS
);
2148 // The SVE types are effectively target-specific. The length of an
2149 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2150 // of 128 bits. There is one predicate bit for each vector byte, so the
2151 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2153 // Because the length is only known at runtime, we use a dummy value
2154 // of 0 for the static length. The alignment values are those defined
2155 // by the Procedure Call Standard for the Arm Architecture.
2156 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
2157 IsSigned, IsFP, IsBF) \
2158 case BuiltinType::Id: \
2162 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
2163 case BuiltinType::Id: \
2167 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
2168 case BuiltinType::Id: \
2172 #include "clang/Basic/AArch64SVEACLETypes.def"
2173 #define PPC_VECTOR_TYPE(Name, Id, Size) \
2174 case BuiltinType::Id: \
2178 #include "clang/Basic/PPCTypes.def"
2179 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2181 case BuiltinType::Id: \
2185 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2186 case BuiltinType::Id: \
2190 #include "clang/Basic/RISCVVTypes.def"
2191 #define WASM_TYPE(Name, Id, SingletonId) \
2192 case BuiltinType::Id: \
2196 #include "clang/Basic/WebAssemblyReferenceTypes.def"
2199 case Type::ObjCObjectPointer
:
2200 Width
= Target
->getPointerWidth(LangAS::Default
);
2201 Align
= Target
->getPointerAlign(LangAS::Default
);
2203 case Type::BlockPointer
:
2204 AS
= cast
<BlockPointerType
>(T
)->getPointeeType().getAddressSpace();
2205 Width
= Target
->getPointerWidth(AS
);
2206 Align
= Target
->getPointerAlign(AS
);
2208 case Type::LValueReference
:
2209 case Type::RValueReference
:
2210 // alignof and sizeof should never enter this code path here, so we go
2211 // the pointer route.
2212 AS
= cast
<ReferenceType
>(T
)->getPointeeType().getAddressSpace();
2213 Width
= Target
->getPointerWidth(AS
);
2214 Align
= Target
->getPointerAlign(AS
);
2217 AS
= cast
<PointerType
>(T
)->getPointeeType().getAddressSpace();
2218 Width
= Target
->getPointerWidth(AS
);
2219 Align
= Target
->getPointerAlign(AS
);
2221 case Type::MemberPointer
: {
2222 const auto *MPT
= cast
<MemberPointerType
>(T
);
2223 CXXABI::MemberPointerInfo MPI
= ABI
->getMemberPointerInfo(MPT
);
2228 case Type::Complex
: {
2229 // Complex types have the same alignment as their elements, but twice the
2231 TypeInfo EltInfo
= getTypeInfo(cast
<ComplexType
>(T
)->getElementType());
2232 Width
= EltInfo
.Width
* 2;
2233 Align
= EltInfo
.Align
;
2236 case Type::ObjCObject
:
2237 return getTypeInfo(cast
<ObjCObjectType
>(T
)->getBaseType().getTypePtr());
2238 case Type::Adjusted
:
2240 return getTypeInfo(cast
<AdjustedType
>(T
)->getAdjustedType().getTypePtr());
2241 case Type::ObjCInterface
: {
2242 const auto *ObjCI
= cast
<ObjCInterfaceType
>(T
);
2243 if (ObjCI
->getDecl()->isInvalidDecl()) {
2248 const ASTRecordLayout
&Layout
= getASTObjCInterfaceLayout(ObjCI
->getDecl());
2249 Width
= toBits(Layout
.getSize());
2250 Align
= toBits(Layout
.getAlignment());
2253 case Type::BitInt
: {
2254 const auto *EIT
= cast
<BitIntType
>(T
);
2255 Align
= std::clamp
<unsigned>(llvm::PowerOf2Ceil(EIT
->getNumBits()),
2256 getCharWidth(), Target
->getLongLongAlign());
2257 Width
= llvm::alignTo(EIT
->getNumBits(), Align
);
2262 const auto *TT
= cast
<TagType
>(T
);
2264 if (TT
->getDecl()->isInvalidDecl()) {
2270 if (const auto *ET
= dyn_cast
<EnumType
>(TT
)) {
2271 const EnumDecl
*ED
= ET
->getDecl();
2273 getTypeInfo(ED
->getIntegerType()->getUnqualifiedDesugaredType());
2274 if (unsigned AttrAlign
= ED
->getMaxAlignment()) {
2275 Info
.Align
= AttrAlign
;
2276 Info
.AlignRequirement
= AlignRequirementKind::RequiredByEnum
;
2281 const auto *RT
= cast
<RecordType
>(TT
);
2282 const RecordDecl
*RD
= RT
->getDecl();
2283 const ASTRecordLayout
&Layout
= getASTRecordLayout(RD
);
2284 Width
= toBits(Layout
.getSize());
2285 Align
= toBits(Layout
.getAlignment());
2286 AlignRequirement
= RD
->hasAttr
<AlignedAttr
>()
2287 ? AlignRequirementKind::RequiredByRecord
2288 : AlignRequirementKind::None
;
2292 case Type::SubstTemplateTypeParm
:
2293 return getTypeInfo(cast
<SubstTemplateTypeParmType
>(T
)->
2294 getReplacementType().getTypePtr());
2297 case Type::DeducedTemplateSpecialization
: {
2298 const auto *A
= cast
<DeducedType
>(T
);
2299 assert(!A
->getDeducedType().isNull() &&
2300 "cannot request the size of an undeduced or dependent auto type");
2301 return getTypeInfo(A
->getDeducedType().getTypePtr());
2305 return getTypeInfo(cast
<ParenType
>(T
)->getInnerType().getTypePtr());
2307 case Type::MacroQualified
:
2309 cast
<MacroQualifiedType
>(T
)->getUnderlyingType().getTypePtr());
2311 case Type::ObjCTypeParam
:
2312 return getTypeInfo(cast
<ObjCTypeParamType
>(T
)->desugar().getTypePtr());
2315 return getTypeInfo(cast
<UsingType
>(T
)->desugar().getTypePtr());
2317 case Type::Typedef
: {
2318 const auto *TT
= cast
<TypedefType
>(T
);
2319 TypeInfo Info
= getTypeInfo(TT
->desugar().getTypePtr());
2320 // If the typedef has an aligned attribute on it, it overrides any computed
2321 // alignment we have. This violates the GCC documentation (which says that
2322 // attribute(aligned) can only round up) but matches its implementation.
2323 if (unsigned AttrAlign
= TT
->getDecl()->getMaxAlignment()) {
2325 AlignRequirement
= AlignRequirementKind::RequiredByTypedef
;
2328 AlignRequirement
= Info
.AlignRequirement
;
2334 case Type::Elaborated
:
2335 return getTypeInfo(cast
<ElaboratedType
>(T
)->getNamedType().getTypePtr());
2337 case Type::Attributed
:
2339 cast
<AttributedType
>(T
)->getEquivalentType().getTypePtr());
2341 case Type::BTFTagAttributed
:
2343 cast
<BTFTagAttributedType
>(T
)->getWrappedType().getTypePtr());
2345 case Type::Atomic
: {
2346 // Start with the base type information.
2347 TypeInfo Info
= getTypeInfo(cast
<AtomicType
>(T
)->getValueType());
2352 // An otherwise zero-sized type should still generate an
2353 // atomic operation.
2354 Width
= Target
->getCharWidth();
2356 } else if (Width
<= Target
->getMaxAtomicPromoteWidth()) {
2357 // If the size of the type doesn't exceed the platform's max
2358 // atomic promotion width, make the size and alignment more
2359 // favorable to atomic operations:
2361 // Round the size up to a power of 2.
2362 Width
= llvm::bit_ceil(Width
);
2364 // Set the alignment equal to the size.
2365 Align
= static_cast<unsigned>(Width
);
2371 Width
= Target
->getPointerWidth(LangAS::opencl_global
);
2372 Align
= Target
->getPointerAlign(LangAS::opencl_global
);
2376 assert(llvm::isPowerOf2_32(Align
) && "Alignment must be power of 2");
2377 return TypeInfo(Width
, Align
, AlignRequirement
);
2380 unsigned ASTContext::getTypeUnadjustedAlign(const Type
*T
) const {
2381 UnadjustedAlignMap::iterator I
= MemoizedUnadjustedAlign
.find(T
);
2382 if (I
!= MemoizedUnadjustedAlign
.end())
2385 unsigned UnadjustedAlign
;
2386 if (const auto *RT
= T
->getAs
<RecordType
>()) {
2387 const RecordDecl
*RD
= RT
->getDecl();
2388 const ASTRecordLayout
&Layout
= getASTRecordLayout(RD
);
2389 UnadjustedAlign
= toBits(Layout
.getUnadjustedAlignment());
2390 } else if (const auto *ObjCI
= T
->getAs
<ObjCInterfaceType
>()) {
2391 const ASTRecordLayout
&Layout
= getASTObjCInterfaceLayout(ObjCI
->getDecl());
2392 UnadjustedAlign
= toBits(Layout
.getUnadjustedAlignment());
2394 UnadjustedAlign
= getTypeAlign(T
->getUnqualifiedDesugaredType());
2397 MemoizedUnadjustedAlign
[T
] = UnadjustedAlign
;
2398 return UnadjustedAlign
;
2401 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T
) const {
2402 unsigned SimdAlign
= llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2403 getTargetInfo().getTriple(), Target
->getTargetOpts().FeatureMap
);
2407 /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2408 CharUnits
ASTContext::toCharUnitsFromBits(int64_t BitSize
) const {
2409 return CharUnits::fromQuantity(BitSize
/ getCharWidth());
2412 /// toBits - Convert a size in characters to a size in characters.
2413 int64_t ASTContext::toBits(CharUnits CharSize
) const {
2414 return CharSize
.getQuantity() * getCharWidth();
2417 /// getTypeSizeInChars - Return the size of the specified type, in characters.
2418 /// This method does not work on incomplete types.
2419 CharUnits
ASTContext::getTypeSizeInChars(QualType T
) const {
2420 return getTypeInfoInChars(T
).Width
;
2422 CharUnits
ASTContext::getTypeSizeInChars(const Type
*T
) const {
2423 return getTypeInfoInChars(T
).Width
;
2426 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2427 /// characters. This method does not work on incomplete types.
2428 CharUnits
ASTContext::getTypeAlignInChars(QualType T
) const {
2429 return toCharUnitsFromBits(getTypeAlign(T
));
2431 CharUnits
ASTContext::getTypeAlignInChars(const Type
*T
) const {
2432 return toCharUnitsFromBits(getTypeAlign(T
));
2435 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2436 /// type, in characters, before alignment adjustments. This method does
2437 /// not work on incomplete types.
2438 CharUnits
ASTContext::getTypeUnadjustedAlignInChars(QualType T
) const {
2439 return toCharUnitsFromBits(getTypeUnadjustedAlign(T
));
2441 CharUnits
ASTContext::getTypeUnadjustedAlignInChars(const Type
*T
) const {
2442 return toCharUnitsFromBits(getTypeUnadjustedAlign(T
));
2445 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2446 /// type for the current target in bits. This can be different than the ABI
2447 /// alignment in cases where it is beneficial for performance or backwards
2448 /// compatibility preserving to overalign a data type. (Note: despite the name,
2449 /// the preferred alignment is ABI-impacting, and not an optimization.)
2450 unsigned ASTContext::getPreferredTypeAlign(const Type
*T
) const {
2451 TypeInfo TI
= getTypeInfo(T
);
2452 unsigned ABIAlign
= TI
.Align
;
2454 T
= T
->getBaseElementTypeUnsafe();
2456 // The preferred alignment of member pointers is that of a pointer.
2457 if (T
->isMemberPointerType())
2458 return getPreferredTypeAlign(getPointerDiffType().getTypePtr());
2460 if (!Target
->allowsLargerPreferedTypeAlignment())
2463 if (const auto *RT
= T
->getAs
<RecordType
>()) {
2464 const RecordDecl
*RD
= RT
->getDecl();
2466 // When used as part of a typedef, or together with a 'packed' attribute,
2467 // the 'aligned' attribute can be used to decrease alignment. Note that the
2468 // 'packed' case is already taken into consideration when computing the
2469 // alignment, we only need to handle the typedef case here.
2470 if (TI
.AlignRequirement
== AlignRequirementKind::RequiredByTypedef
||
2471 RD
->isInvalidDecl())
2474 unsigned PreferredAlign
= static_cast<unsigned>(
2475 toBits(getASTRecordLayout(RD
).PreferredAlignment
));
2476 assert(PreferredAlign
>= ABIAlign
&&
2477 "PreferredAlign should be at least as large as ABIAlign.");
2478 return PreferredAlign
;
2481 // Double (and, for targets supporting AIX `power` alignment, long double) and
2482 // long long should be naturally aligned (despite requiring less alignment) if
2484 if (const auto *CT
= T
->getAs
<ComplexType
>())
2485 T
= CT
->getElementType().getTypePtr();
2486 if (const auto *ET
= T
->getAs
<EnumType
>())
2487 T
= ET
->getDecl()->getIntegerType().getTypePtr();
2488 if (T
->isSpecificBuiltinType(BuiltinType::Double
) ||
2489 T
->isSpecificBuiltinType(BuiltinType::LongLong
) ||
2490 T
->isSpecificBuiltinType(BuiltinType::ULongLong
) ||
2491 (T
->isSpecificBuiltinType(BuiltinType::LongDouble
) &&
2492 Target
->defaultsToAIXPowerAlignment()))
2493 // Don't increase the alignment if an alignment attribute was specified on a
2494 // typedef declaration.
2495 if (!TI
.isAlignRequired())
2496 return std::max(ABIAlign
, (unsigned)getTypeSize(T
));
2501 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2502 /// for __attribute__((aligned)) on this target, to be used if no alignment
2503 /// value is specified.
2504 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
2505 return getTargetInfo().getDefaultAlignForAttributeAligned();
2508 /// getAlignOfGlobalVar - Return the alignment in bits that should be given
2509 /// to a global variable of the specified type.
2510 unsigned ASTContext::getAlignOfGlobalVar(QualType T
) const {
2511 uint64_t TypeSize
= getTypeSize(T
.getTypePtr());
2512 return std::max(getPreferredTypeAlign(T
),
2513 getTargetInfo().getMinGlobalAlign(TypeSize
));
2516 /// getAlignOfGlobalVarInChars - Return the alignment in characters that
2517 /// should be given to a global variable of the specified type.
2518 CharUnits
ASTContext::getAlignOfGlobalVarInChars(QualType T
) const {
2519 return toCharUnitsFromBits(getAlignOfGlobalVar(T
));
2522 CharUnits
ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl
*RD
) const {
2523 CharUnits Offset
= CharUnits::Zero();
2524 const ASTRecordLayout
*Layout
= &getASTRecordLayout(RD
);
2525 while (const CXXRecordDecl
*Base
= Layout
->getBaseSharingVBPtr()) {
2526 Offset
+= Layout
->getBaseClassOffset(Base
);
2527 Layout
= &getASTRecordLayout(Base
);
2532 CharUnits
ASTContext::getMemberPointerPathAdjustment(const APValue
&MP
) const {
2533 const ValueDecl
*MPD
= MP
.getMemberPointerDecl();
2534 CharUnits ThisAdjustment
= CharUnits::Zero();
2535 ArrayRef
<const CXXRecordDecl
*> Path
= MP
.getMemberPointerPath();
2536 bool DerivedMember
= MP
.isMemberPointerToDerivedMember();
2537 const CXXRecordDecl
*RD
= cast
<CXXRecordDecl
>(MPD
->getDeclContext());
2538 for (unsigned I
= 0, N
= Path
.size(); I
!= N
; ++I
) {
2539 const CXXRecordDecl
*Base
= RD
;
2540 const CXXRecordDecl
*Derived
= Path
[I
];
2542 std::swap(Base
, Derived
);
2543 ThisAdjustment
+= getASTRecordLayout(Derived
).getBaseClassOffset(Base
);
2547 ThisAdjustment
= -ThisAdjustment
;
2548 return ThisAdjustment
;
2551 /// DeepCollectObjCIvars -
2552 /// This routine first collects all declared, but not synthesized, ivars in
2553 /// super class and then collects all ivars, including those synthesized for
2554 /// current class. This routine is used for implementation of current class
2555 /// when all ivars, declared and synthesized are known.
2556 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl
*OI
,
2558 SmallVectorImpl
<const ObjCIvarDecl
*> &Ivars
) const {
2559 if (const ObjCInterfaceDecl
*SuperClass
= OI
->getSuperClass())
2560 DeepCollectObjCIvars(SuperClass
, false, Ivars
);
2562 llvm::append_range(Ivars
, OI
->ivars());
2564 auto *IDecl
= const_cast<ObjCInterfaceDecl
*>(OI
);
2565 for (const ObjCIvarDecl
*Iv
= IDecl
->all_declared_ivar_begin(); Iv
;
2566 Iv
= Iv
->getNextIvar())
2567 Ivars
.push_back(Iv
);
2571 /// CollectInheritedProtocols - Collect all protocols in current class and
2572 /// those inherited by it.
2573 void ASTContext::CollectInheritedProtocols(const Decl
*CDecl
,
2574 llvm::SmallPtrSet
<ObjCProtocolDecl
*, 8> &Protocols
) {
2575 if (const auto *OI
= dyn_cast
<ObjCInterfaceDecl
>(CDecl
)) {
2576 // We can use protocol_iterator here instead of
2577 // all_referenced_protocol_iterator since we are walking all categories.
2578 for (auto *Proto
: OI
->all_referenced_protocols()) {
2579 CollectInheritedProtocols(Proto
, Protocols
);
2582 // Categories of this Interface.
2583 for (const auto *Cat
: OI
->visible_categories())
2584 CollectInheritedProtocols(Cat
, Protocols
);
2586 if (ObjCInterfaceDecl
*SD
= OI
->getSuperClass())
2588 CollectInheritedProtocols(SD
, Protocols
);
2589 SD
= SD
->getSuperClass();
2591 } else if (const auto *OC
= dyn_cast
<ObjCCategoryDecl
>(CDecl
)) {
2592 for (auto *Proto
: OC
->protocols()) {
2593 CollectInheritedProtocols(Proto
, Protocols
);
2595 } else if (const auto *OP
= dyn_cast
<ObjCProtocolDecl
>(CDecl
)) {
2596 // Insert the protocol.
2597 if (!Protocols
.insert(
2598 const_cast<ObjCProtocolDecl
*>(OP
->getCanonicalDecl())).second
)
2601 for (auto *Proto
: OP
->protocols())
2602 CollectInheritedProtocols(Proto
, Protocols
);
2606 static bool unionHasUniqueObjectRepresentations(const ASTContext
&Context
,
2607 const RecordDecl
*RD
,
2608 bool CheckIfTriviallyCopyable
) {
2609 assert(RD
->isUnion() && "Must be union type");
2610 CharUnits UnionSize
= Context
.getTypeSizeInChars(RD
->getTypeForDecl());
2612 for (const auto *Field
: RD
->fields()) {
2613 if (!Context
.hasUniqueObjectRepresentations(Field
->getType(),
2614 CheckIfTriviallyCopyable
))
2616 CharUnits FieldSize
= Context
.getTypeSizeInChars(Field
->getType());
2617 if (FieldSize
!= UnionSize
)
2620 return !RD
->field_empty();
2623 static int64_t getSubobjectOffset(const FieldDecl
*Field
,
2624 const ASTContext
&Context
,
2625 const clang::ASTRecordLayout
& /*Layout*/) {
2626 return Context
.getFieldOffset(Field
);
2629 static int64_t getSubobjectOffset(const CXXRecordDecl
*RD
,
2630 const ASTContext
&Context
,
2631 const clang::ASTRecordLayout
&Layout
) {
2632 return Context
.toBits(Layout
.getBaseClassOffset(RD
));
2635 static std::optional
<int64_t>
2636 structHasUniqueObjectRepresentations(const ASTContext
&Context
,
2637 const RecordDecl
*RD
,
2638 bool CheckIfTriviallyCopyable
);
2640 static std::optional
<int64_t>
2641 getSubobjectSizeInBits(const FieldDecl
*Field
, const ASTContext
&Context
,
2642 bool CheckIfTriviallyCopyable
) {
2643 if (Field
->getType()->isRecordType()) {
2644 const RecordDecl
*RD
= Field
->getType()->getAsRecordDecl();
2646 return structHasUniqueObjectRepresentations(Context
, RD
,
2647 CheckIfTriviallyCopyable
);
2650 // A _BitInt type may not be unique if it has padding bits
2651 // but if it is a bitfield the padding bits are not used.
2652 bool IsBitIntType
= Field
->getType()->isBitIntType();
2653 if (!Field
->getType()->isReferenceType() && !IsBitIntType
&&
2654 !Context
.hasUniqueObjectRepresentations(Field
->getType(),
2655 CheckIfTriviallyCopyable
))
2656 return std::nullopt
;
2658 int64_t FieldSizeInBits
=
2659 Context
.toBits(Context
.getTypeSizeInChars(Field
->getType()));
2660 if (Field
->isBitField()) {
2661 // If we have explicit padding bits, they don't contribute bits
2662 // to the actual object representation, so return 0.
2663 if (Field
->isUnnamedBitfield())
2666 int64_t BitfieldSize
= Field
->getBitWidthValue(Context
);
2668 if ((unsigned)BitfieldSize
>
2669 cast
<BitIntType
>(Field
->getType())->getNumBits())
2670 return std::nullopt
;
2671 } else if (BitfieldSize
> FieldSizeInBits
) {
2672 return std::nullopt
;
2674 FieldSizeInBits
= BitfieldSize
;
2675 } else if (IsBitIntType
&& !Context
.hasUniqueObjectRepresentations(
2676 Field
->getType(), CheckIfTriviallyCopyable
)) {
2677 return std::nullopt
;
2679 return FieldSizeInBits
;
2682 static std::optional
<int64_t>
2683 getSubobjectSizeInBits(const CXXRecordDecl
*RD
, const ASTContext
&Context
,
2684 bool CheckIfTriviallyCopyable
) {
2685 return structHasUniqueObjectRepresentations(Context
, RD
,
2686 CheckIfTriviallyCopyable
);
2689 template <typename RangeT
>
2690 static std::optional
<int64_t> structSubobjectsHaveUniqueObjectRepresentations(
2691 const RangeT
&Subobjects
, int64_t CurOffsetInBits
,
2692 const ASTContext
&Context
, const clang::ASTRecordLayout
&Layout
,
2693 bool CheckIfTriviallyCopyable
) {
2694 for (const auto *Subobject
: Subobjects
) {
2695 std::optional
<int64_t> SizeInBits
=
2696 getSubobjectSizeInBits(Subobject
, Context
, CheckIfTriviallyCopyable
);
2698 return std::nullopt
;
2699 if (*SizeInBits
!= 0) {
2700 int64_t Offset
= getSubobjectOffset(Subobject
, Context
, Layout
);
2701 if (Offset
!= CurOffsetInBits
)
2702 return std::nullopt
;
2703 CurOffsetInBits
+= *SizeInBits
;
2706 return CurOffsetInBits
;
2709 static std::optional
<int64_t>
2710 structHasUniqueObjectRepresentations(const ASTContext
&Context
,
2711 const RecordDecl
*RD
,
2712 bool CheckIfTriviallyCopyable
) {
2713 assert(!RD
->isUnion() && "Must be struct/class type");
2714 const auto &Layout
= Context
.getASTRecordLayout(RD
);
2716 int64_t CurOffsetInBits
= 0;
2717 if (const auto *ClassDecl
= dyn_cast
<CXXRecordDecl
>(RD
)) {
2718 if (ClassDecl
->isDynamicClass())
2719 return std::nullopt
;
2721 SmallVector
<CXXRecordDecl
*, 4> Bases
;
2722 for (const auto &Base
: ClassDecl
->bases()) {
2723 // Empty types can be inherited from, and non-empty types can potentially
2724 // have tail padding, so just make sure there isn't an error.
2725 Bases
.emplace_back(Base
.getType()->getAsCXXRecordDecl());
2728 llvm::sort(Bases
, [&](const CXXRecordDecl
*L
, const CXXRecordDecl
*R
) {
2729 return Layout
.getBaseClassOffset(L
) < Layout
.getBaseClassOffset(R
);
2732 std::optional
<int64_t> OffsetAfterBases
=
2733 structSubobjectsHaveUniqueObjectRepresentations(
2734 Bases
, CurOffsetInBits
, Context
, Layout
, CheckIfTriviallyCopyable
);
2735 if (!OffsetAfterBases
)
2736 return std::nullopt
;
2737 CurOffsetInBits
= *OffsetAfterBases
;
2740 std::optional
<int64_t> OffsetAfterFields
=
2741 structSubobjectsHaveUniqueObjectRepresentations(
2742 RD
->fields(), CurOffsetInBits
, Context
, Layout
,
2743 CheckIfTriviallyCopyable
);
2744 if (!OffsetAfterFields
)
2745 return std::nullopt
;
2746 CurOffsetInBits
= *OffsetAfterFields
;
2748 return CurOffsetInBits
;
2751 bool ASTContext::hasUniqueObjectRepresentations(
2752 QualType Ty
, bool CheckIfTriviallyCopyable
) const {
2753 // C++17 [meta.unary.prop]:
2754 // The predicate condition for a template specialization
2755 // has_unique_object_representations<T> shall be
2756 // satisfied if and only if:
2757 // (9.1) - T is trivially copyable, and
2758 // (9.2) - any two objects of type T with the same value have the same
2759 // object representation, where two objects
2760 // of array or non-union class type are considered to have the same value
2761 // if their respective sequences of
2762 // direct subobjects have the same values, and two objects of union type
2763 // are considered to have the same
2764 // value if they have the same active member and the corresponding members
2765 // have the same value.
2766 // The set of scalar types for which this condition holds is
2767 // implementation-defined. [ Note: If a type has padding
2768 // bits, the condition does not hold; otherwise, the condition holds true
2769 // for unsigned integral types. -- end note ]
2770 assert(!Ty
.isNull() && "Null QualType sent to unique object rep check");
2772 // Arrays are unique only if their element type is unique.
2773 if (Ty
->isArrayType())
2774 return hasUniqueObjectRepresentations(getBaseElementType(Ty
),
2775 CheckIfTriviallyCopyable
);
2777 // (9.1) - T is trivially copyable...
2778 if (CheckIfTriviallyCopyable
&& !Ty
.isTriviallyCopyableType(*this))
2781 // All integrals and enums are unique.
2782 if (Ty
->isIntegralOrEnumerationType()) {
2783 // Except _BitInt types that have padding bits.
2784 if (const auto *BIT
= Ty
->getAs
<BitIntType
>())
2785 return getTypeSize(BIT
) == BIT
->getNumBits();
2790 // All other pointers are unique.
2791 if (Ty
->isPointerType())
2794 if (const auto *MPT
= Ty
->getAs
<MemberPointerType
>())
2795 return !ABI
->getMemberPointerInfo(MPT
).HasPadding
;
2797 if (Ty
->isRecordType()) {
2798 const RecordDecl
*Record
= Ty
->castAs
<RecordType
>()->getDecl();
2800 if (Record
->isInvalidDecl())
2803 if (Record
->isUnion())
2804 return unionHasUniqueObjectRepresentations(*this, Record
,
2805 CheckIfTriviallyCopyable
);
2807 std::optional
<int64_t> StructSize
= structHasUniqueObjectRepresentations(
2808 *this, Record
, CheckIfTriviallyCopyable
);
2810 return StructSize
&& *StructSize
== static_cast<int64_t>(getTypeSize(Ty
));
2813 // FIXME: More cases to handle here (list by rsmith):
2814 // vectors (careful about, eg, vector of 3 foo)
2815 // _Complex int and friends
2817 // Obj-C block pointers
2818 // Obj-C object pointers
2819 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2820 // clk_event_t, queue_t, reserve_id_t)
2821 // There're also Obj-C class types and the Obj-C selector type, but I think it
2822 // makes sense for those to return false here.
2827 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl
*OI
) const {
2829 // Count ivars declared in class extension.
2830 for (const auto *Ext
: OI
->known_extensions())
2831 count
+= Ext
->ivar_size();
2833 // Count ivar defined in this class's implementation. This
2834 // includes synthesized ivars.
2835 if (ObjCImplementationDecl
*ImplDecl
= OI
->getImplementation())
2836 count
+= ImplDecl
->ivar_size();
2841 bool ASTContext::isSentinelNullExpr(const Expr
*E
) {
2845 // nullptr_t is always treated as null.
2846 if (E
->getType()->isNullPtrType()) return true;
2848 if (E
->getType()->isAnyPointerType() &&
2849 E
->IgnoreParenCasts()->isNullPointerConstant(*this,
2850 Expr::NPC_ValueDependentIsNull
))
2853 // Unfortunately, __null has type 'int'.
2854 if (isa
<GNUNullExpr
>(E
)) return true;
2859 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2861 ObjCImplementationDecl
*ASTContext::getObjCImplementation(ObjCInterfaceDecl
*D
) {
2862 llvm::DenseMap
<ObjCContainerDecl
*, ObjCImplDecl
*>::iterator
2863 I
= ObjCImpls
.find(D
);
2864 if (I
!= ObjCImpls
.end())
2865 return cast
<ObjCImplementationDecl
>(I
->second
);
2869 /// Get the implementation of ObjCCategoryDecl, or nullptr if none
2871 ObjCCategoryImplDecl
*ASTContext::getObjCImplementation(ObjCCategoryDecl
*D
) {
2872 llvm::DenseMap
<ObjCContainerDecl
*, ObjCImplDecl
*>::iterator
2873 I
= ObjCImpls
.find(D
);
2874 if (I
!= ObjCImpls
.end())
2875 return cast
<ObjCCategoryImplDecl
>(I
->second
);
2879 /// Set the implementation of ObjCInterfaceDecl.
2880 void ASTContext::setObjCImplementation(ObjCInterfaceDecl
*IFaceD
,
2881 ObjCImplementationDecl
*ImplD
) {
2882 assert(IFaceD
&& ImplD
&& "Passed null params");
2883 ObjCImpls
[IFaceD
] = ImplD
;
2886 /// Set the implementation of ObjCCategoryDecl.
2887 void ASTContext::setObjCImplementation(ObjCCategoryDecl
*CatD
,
2888 ObjCCategoryImplDecl
*ImplD
) {
2889 assert(CatD
&& ImplD
&& "Passed null params");
2890 ObjCImpls
[CatD
] = ImplD
;
2893 const ObjCMethodDecl
*
2894 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl
*MD
) const {
2895 return ObjCMethodRedecls
.lookup(MD
);
2898 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl
*MD
,
2899 const ObjCMethodDecl
*Redecl
) {
2900 assert(!getObjCMethodRedeclaration(MD
) && "MD already has a redeclaration");
2901 ObjCMethodRedecls
[MD
] = Redecl
;
2904 const ObjCInterfaceDecl
*ASTContext::getObjContainingInterface(
2905 const NamedDecl
*ND
) const {
2906 if (const auto *ID
= dyn_cast
<ObjCInterfaceDecl
>(ND
->getDeclContext()))
2908 if (const auto *CD
= dyn_cast
<ObjCCategoryDecl
>(ND
->getDeclContext()))
2909 return CD
->getClassInterface();
2910 if (const auto *IMD
= dyn_cast
<ObjCImplDecl
>(ND
->getDeclContext()))
2911 return IMD
->getClassInterface();
2916 /// Get the copy initialization expression of VarDecl, or nullptr if
2918 BlockVarCopyInit
ASTContext::getBlockVarCopyInit(const VarDecl
*VD
) const {
2919 assert(VD
&& "Passed null params");
2920 assert(VD
->hasAttr
<BlocksAttr
>() &&
2921 "getBlockVarCopyInits - not __block var");
2922 auto I
= BlockVarCopyInits
.find(VD
);
2923 if (I
!= BlockVarCopyInits
.end())
2925 return {nullptr, false};
2928 /// Set the copy initialization expression of a block var decl.
2929 void ASTContext::setBlockVarCopyInit(const VarDecl
*VD
, Expr
*CopyExpr
,
2931 assert(VD
&& CopyExpr
&& "Passed null params");
2932 assert(VD
->hasAttr
<BlocksAttr
>() &&
2933 "setBlockVarCopyInits - not __block var");
2934 BlockVarCopyInits
[VD
].setExprAndFlag(CopyExpr
, CanThrow
);
2937 TypeSourceInfo
*ASTContext::CreateTypeSourceInfo(QualType T
,
2938 unsigned DataSize
) const {
2940 DataSize
= TypeLoc::getFullDataSizeForType(T
);
2942 assert(DataSize
== TypeLoc::getFullDataSizeForType(T
) &&
2943 "incorrect data size provided to CreateTypeSourceInfo!");
2946 (TypeSourceInfo
*)BumpAlloc
.Allocate(sizeof(TypeSourceInfo
) + DataSize
, 8);
2947 new (TInfo
) TypeSourceInfo(T
, DataSize
);
2951 TypeSourceInfo
*ASTContext::getTrivialTypeSourceInfo(QualType T
,
2952 SourceLocation L
) const {
2953 TypeSourceInfo
*DI
= CreateTypeSourceInfo(T
);
2954 DI
->getTypeLoc().initialize(const_cast<ASTContext
&>(*this), L
);
2958 const ASTRecordLayout
&
2959 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl
*D
) const {
2960 return getObjCLayout(D
, nullptr);
2963 const ASTRecordLayout
&
2964 ASTContext::getASTObjCImplementationLayout(
2965 const ObjCImplementationDecl
*D
) const {
2966 return getObjCLayout(D
->getClassInterface(), D
);
2969 static auto getCanonicalTemplateArguments(const ASTContext
&C
,
2970 ArrayRef
<TemplateArgument
> Args
,
2971 bool &AnyNonCanonArgs
) {
2972 SmallVector
<TemplateArgument
, 16> CanonArgs(Args
);
2973 for (auto &Arg
: CanonArgs
) {
2974 TemplateArgument OrigArg
= Arg
;
2975 Arg
= C
.getCanonicalTemplateArgument(Arg
);
2976 AnyNonCanonArgs
|= !Arg
.structurallyEquals(OrigArg
);
2981 //===----------------------------------------------------------------------===//
2982 // Type creation/memoization methods
2983 //===----------------------------------------------------------------------===//
2986 ASTContext::getExtQualType(const Type
*baseType
, Qualifiers quals
) const {
2987 unsigned fastQuals
= quals
.getFastQualifiers();
2988 quals
.removeFastQualifiers();
2990 // Check if we've already instantiated this type.
2991 llvm::FoldingSetNodeID ID
;
2992 ExtQuals::Profile(ID
, baseType
, quals
);
2993 void *insertPos
= nullptr;
2994 if (ExtQuals
*eq
= ExtQualNodes
.FindNodeOrInsertPos(ID
, insertPos
)) {
2995 assert(eq
->getQualifiers() == quals
);
2996 return QualType(eq
, fastQuals
);
2999 // If the base type is not canonical, make the appropriate canonical type.
3001 if (!baseType
->isCanonicalUnqualified()) {
3002 SplitQualType canonSplit
= baseType
->getCanonicalTypeInternal().split();
3003 canonSplit
.Quals
.addConsistentQualifiers(quals
);
3004 canon
= getExtQualType(canonSplit
.Ty
, canonSplit
.Quals
);
3006 // Re-find the insert position.
3007 (void) ExtQualNodes
.FindNodeOrInsertPos(ID
, insertPos
);
3010 auto *eq
= new (*this, alignof(ExtQuals
)) ExtQuals(baseType
, canon
, quals
);
3011 ExtQualNodes
.InsertNode(eq
, insertPos
);
3012 return QualType(eq
, fastQuals
);
3015 QualType
ASTContext::getAddrSpaceQualType(QualType T
,
3016 LangAS AddressSpace
) const {
3017 QualType CanT
= getCanonicalType(T
);
3018 if (CanT
.getAddressSpace() == AddressSpace
)
3021 // If we are composing extended qualifiers together, merge together
3022 // into one ExtQuals node.
3023 QualifierCollector Quals
;
3024 const Type
*TypeNode
= Quals
.strip(T
);
3026 // If this type already has an address space specified, it cannot get
3028 assert(!Quals
.hasAddressSpace() &&
3029 "Type cannot be in multiple addr spaces!");
3030 Quals
.addAddressSpace(AddressSpace
);
3032 return getExtQualType(TypeNode
, Quals
);
3035 QualType
ASTContext::removeAddrSpaceQualType(QualType T
) const {
3036 // If the type is not qualified with an address space, just return it
3038 if (!T
.hasAddressSpace())
3041 // If we are composing extended qualifiers together, merge together
3042 // into one ExtQuals node.
3043 QualifierCollector Quals
;
3044 const Type
*TypeNode
;
3046 while (T
.hasAddressSpace()) {
3047 TypeNode
= Quals
.strip(T
);
3049 // If the type no longer has an address space after stripping qualifiers,
3051 if (!QualType(TypeNode
, 0).hasAddressSpace())
3054 // There might be sugar in the way. Strip it and try again.
3055 T
= T
.getSingleStepDesugaredType(*this);
3058 Quals
.removeAddressSpace();
3060 // Removal of the address space can mean there are no longer any
3061 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3063 if (Quals
.hasNonFastQualifiers())
3064 return getExtQualType(TypeNode
, Quals
);
3066 return QualType(TypeNode
, Quals
.getFastQualifiers());
3069 QualType
ASTContext::getObjCGCQualType(QualType T
,
3070 Qualifiers::GC GCAttr
) const {
3071 QualType CanT
= getCanonicalType(T
);
3072 if (CanT
.getObjCGCAttr() == GCAttr
)
3075 if (const auto *ptr
= T
->getAs
<PointerType
>()) {
3076 QualType Pointee
= ptr
->getPointeeType();
3077 if (Pointee
->isAnyPointerType()) {
3078 QualType ResultType
= getObjCGCQualType(Pointee
, GCAttr
);
3079 return getPointerType(ResultType
);
3083 // If we are composing extended qualifiers together, merge together
3084 // into one ExtQuals node.
3085 QualifierCollector Quals
;
3086 const Type
*TypeNode
= Quals
.strip(T
);
3088 // If this type already has an ObjCGC specified, it cannot get
3090 assert(!Quals
.hasObjCGCAttr() &&
3091 "Type cannot have multiple ObjCGCs!");
3092 Quals
.addObjCGCAttr(GCAttr
);
3094 return getExtQualType(TypeNode
, Quals
);
3097 QualType
ASTContext::removePtrSizeAddrSpace(QualType T
) const {
3098 if (const PointerType
*Ptr
= T
->getAs
<PointerType
>()) {
3099 QualType Pointee
= Ptr
->getPointeeType();
3100 if (isPtrSizeAddressSpace(Pointee
.getAddressSpace())) {
3101 return getPointerType(removeAddrSpaceQualType(Pointee
));
3107 const FunctionType
*ASTContext::adjustFunctionType(const FunctionType
*T
,
3108 FunctionType::ExtInfo Info
) {
3109 if (T
->getExtInfo() == Info
)
3113 if (const auto *FNPT
= dyn_cast
<FunctionNoProtoType
>(T
)) {
3114 Result
= getFunctionNoProtoType(FNPT
->getReturnType(), Info
);
3116 const auto *FPT
= cast
<FunctionProtoType
>(T
);
3117 FunctionProtoType::ExtProtoInfo EPI
= FPT
->getExtProtoInfo();
3119 Result
= getFunctionType(FPT
->getReturnType(), FPT
->getParamTypes(), EPI
);
3122 return cast
<FunctionType
>(Result
.getTypePtr());
3125 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl
*FD
,
3126 QualType ResultType
) {
3127 FD
= FD
->getMostRecentDecl();
3129 const auto *FPT
= FD
->getType()->castAs
<FunctionProtoType
>();
3130 FunctionProtoType::ExtProtoInfo EPI
= FPT
->getExtProtoInfo();
3131 FD
->setType(getFunctionType(ResultType
, FPT
->getParamTypes(), EPI
));
3132 if (FunctionDecl
*Next
= FD
->getPreviousDecl())
3137 if (ASTMutationListener
*L
= getASTMutationListener())
3138 L
->DeducedReturnType(FD
, ResultType
);
3141 /// Get a function type and produce the equivalent function type with the
3142 /// specified exception specification. Type sugar that can be present on a
3143 /// declaration of a function with an exception specification is permitted
3144 /// and preserved. Other type sugar (for instance, typedefs) is not.
3145 QualType
ASTContext::getFunctionTypeWithExceptionSpec(
3146 QualType Orig
, const FunctionProtoType::ExceptionSpecInfo
&ESI
) const {
3147 // Might have some parens.
3148 if (const auto *PT
= dyn_cast
<ParenType
>(Orig
))
3149 return getParenType(
3150 getFunctionTypeWithExceptionSpec(PT
->getInnerType(), ESI
));
3152 // Might be wrapped in a macro qualified type.
3153 if (const auto *MQT
= dyn_cast
<MacroQualifiedType
>(Orig
))
3154 return getMacroQualifiedType(
3155 getFunctionTypeWithExceptionSpec(MQT
->getUnderlyingType(), ESI
),
3156 MQT
->getMacroIdentifier());
3158 // Might have a calling-convention attribute.
3159 if (const auto *AT
= dyn_cast
<AttributedType
>(Orig
))
3160 return getAttributedType(
3162 getFunctionTypeWithExceptionSpec(AT
->getModifiedType(), ESI
),
3163 getFunctionTypeWithExceptionSpec(AT
->getEquivalentType(), ESI
));
3165 // Anything else must be a function type. Rebuild it with the new exception
3167 const auto *Proto
= Orig
->castAs
<FunctionProtoType
>();
3168 return getFunctionType(
3169 Proto
->getReturnType(), Proto
->getParamTypes(),
3170 Proto
->getExtProtoInfo().withExceptionSpec(ESI
));
3173 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T
,
3175 return hasSameType(T
, U
) ||
3176 (getLangOpts().CPlusPlus17
&&
3177 hasSameType(getFunctionTypeWithExceptionSpec(T
, EST_None
),
3178 getFunctionTypeWithExceptionSpec(U
, EST_None
)));
3181 QualType
ASTContext::getFunctionTypeWithoutPtrSizes(QualType T
) {
3182 if (const auto *Proto
= T
->getAs
<FunctionProtoType
>()) {
3183 QualType RetTy
= removePtrSizeAddrSpace(Proto
->getReturnType());
3184 SmallVector
<QualType
, 16> Args(Proto
->param_types().size());
3185 for (unsigned i
= 0, n
= Args
.size(); i
!= n
; ++i
)
3186 Args
[i
] = removePtrSizeAddrSpace(Proto
->param_types()[i
]);
3187 return getFunctionType(RetTy
, Args
, Proto
->getExtProtoInfo());
3190 if (const FunctionNoProtoType
*Proto
= T
->getAs
<FunctionNoProtoType
>()) {
3191 QualType RetTy
= removePtrSizeAddrSpace(Proto
->getReturnType());
3192 return getFunctionNoProtoType(RetTy
, Proto
->getExtInfo());
3198 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T
, QualType U
) {
3199 return hasSameType(T
, U
) ||
3200 hasSameType(getFunctionTypeWithoutPtrSizes(T
),
3201 getFunctionTypeWithoutPtrSizes(U
));
3204 void ASTContext::adjustExceptionSpec(
3205 FunctionDecl
*FD
, const FunctionProtoType::ExceptionSpecInfo
&ESI
,
3209 getFunctionTypeWithExceptionSpec(FD
->getType(), ESI
);
3210 FD
->setType(Updated
);
3215 // Update the type in the type source information too.
3216 if (TypeSourceInfo
*TSInfo
= FD
->getTypeSourceInfo()) {
3217 // If the type and the type-as-written differ, we may need to update
3218 // the type-as-written too.
3219 if (TSInfo
->getType() != FD
->getType())
3220 Updated
= getFunctionTypeWithExceptionSpec(TSInfo
->getType(), ESI
);
3222 // FIXME: When we get proper type location information for exceptions,
3223 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3224 // up the TypeSourceInfo;
3225 assert(TypeLoc::getFullDataSizeForType(Updated
) ==
3226 TypeLoc::getFullDataSizeForType(TSInfo
->getType()) &&
3227 "TypeLoc size mismatch from updating exception specification");
3228 TSInfo
->overrideType(Updated
);
3232 /// getComplexType - Return the uniqued reference to the type for a complex
3233 /// number with the specified element type.
3234 QualType
ASTContext::getComplexType(QualType T
) const {
3235 // Unique pointers, to guarantee there is only one pointer of a particular
3237 llvm::FoldingSetNodeID ID
;
3238 ComplexType::Profile(ID
, T
);
3240 void *InsertPos
= nullptr;
3241 if (ComplexType
*CT
= ComplexTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
3242 return QualType(CT
, 0);
3244 // If the pointee type isn't canonical, this won't be a canonical type either,
3245 // so fill in the canonical type field.
3247 if (!T
.isCanonical()) {
3248 Canonical
= getComplexType(getCanonicalType(T
));
3250 // Get the new insert position for the node we care about.
3251 ComplexType
*NewIP
= ComplexTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
3252 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
3254 auto *New
= new (*this, alignof(ComplexType
)) ComplexType(T
, Canonical
);
3255 Types
.push_back(New
);
3256 ComplexTypes
.InsertNode(New
, InsertPos
);
3257 return QualType(New
, 0);
3260 /// getPointerType - Return the uniqued reference to the type for a pointer to
3261 /// the specified type.
3262 QualType
ASTContext::getPointerType(QualType T
) const {
3263 // Unique pointers, to guarantee there is only one pointer of a particular
3265 llvm::FoldingSetNodeID ID
;
3266 PointerType::Profile(ID
, T
);
3268 void *InsertPos
= nullptr;
3269 if (PointerType
*PT
= PointerTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
3270 return QualType(PT
, 0);
3272 // If the pointee type isn't canonical, this won't be a canonical type either,
3273 // so fill in the canonical type field.
3275 if (!T
.isCanonical()) {
3276 Canonical
= getPointerType(getCanonicalType(T
));
3278 // Get the new insert position for the node we care about.
3279 PointerType
*NewIP
= PointerTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
3280 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
3282 auto *New
= new (*this, alignof(PointerType
)) PointerType(T
, Canonical
);
3283 Types
.push_back(New
);
3284 PointerTypes
.InsertNode(New
, InsertPos
);
3285 return QualType(New
, 0);
3288 QualType
ASTContext::getAdjustedType(QualType Orig
, QualType New
) const {
3289 llvm::FoldingSetNodeID ID
;
3290 AdjustedType::Profile(ID
, Orig
, New
);
3291 void *InsertPos
= nullptr;
3292 AdjustedType
*AT
= AdjustedTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
3294 return QualType(AT
, 0);
3296 QualType Canonical
= getCanonicalType(New
);
3298 // Get the new insert position for the node we care about.
3299 AT
= AdjustedTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
3300 assert(!AT
&& "Shouldn't be in the map!");
3302 AT
= new (*this, alignof(AdjustedType
))
3303 AdjustedType(Type::Adjusted
, Orig
, New
, Canonical
);
3304 Types
.push_back(AT
);
3305 AdjustedTypes
.InsertNode(AT
, InsertPos
);
3306 return QualType(AT
, 0);
3309 QualType
ASTContext::getDecayedType(QualType Orig
, QualType Decayed
) const {
3310 llvm::FoldingSetNodeID ID
;
3311 AdjustedType::Profile(ID
, Orig
, Decayed
);
3312 void *InsertPos
= nullptr;
3313 AdjustedType
*AT
= AdjustedTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
3315 return QualType(AT
, 0);
3317 QualType Canonical
= getCanonicalType(Decayed
);
3319 // Get the new insert position for the node we care about.
3320 AT
= AdjustedTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
3321 assert(!AT
&& "Shouldn't be in the map!");
3323 AT
= new (*this, alignof(DecayedType
)) DecayedType(Orig
, Decayed
, Canonical
);
3324 Types
.push_back(AT
);
3325 AdjustedTypes
.InsertNode(AT
, InsertPos
);
3326 return QualType(AT
, 0);
3329 QualType
ASTContext::getDecayedType(QualType T
) const {
3330 assert((T
->isArrayType() || T
->isFunctionType()) && "T does not decay");
3335 // A declaration of a parameter as "array of type" shall be
3336 // adjusted to "qualified pointer to type", where the type
3337 // qualifiers (if any) are those specified within the [ and ] of
3338 // the array type derivation.
3339 if (T
->isArrayType())
3340 Decayed
= getArrayDecayedType(T
);
3343 // A declaration of a parameter as "function returning type"
3344 // shall be adjusted to "pointer to function returning type", as
3346 if (T
->isFunctionType())
3347 Decayed
= getPointerType(T
);
3349 return getDecayedType(T
, Decayed
);
3352 /// getBlockPointerType - Return the uniqued reference to the type for
3353 /// a pointer to the specified block.
3354 QualType
ASTContext::getBlockPointerType(QualType T
) const {
3355 assert(T
->isFunctionType() && "block of function types only");
3356 // Unique pointers, to guarantee there is only one block of a particular
3358 llvm::FoldingSetNodeID ID
;
3359 BlockPointerType::Profile(ID
, T
);
3361 void *InsertPos
= nullptr;
3362 if (BlockPointerType
*PT
=
3363 BlockPointerTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
3364 return QualType(PT
, 0);
3366 // If the block pointee type isn't canonical, this won't be a canonical
3367 // type either so fill in the canonical type field.
3369 if (!T
.isCanonical()) {
3370 Canonical
= getBlockPointerType(getCanonicalType(T
));
3372 // Get the new insert position for the node we care about.
3373 BlockPointerType
*NewIP
=
3374 BlockPointerTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
3375 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
3378 new (*this, alignof(BlockPointerType
)) BlockPointerType(T
, Canonical
);
3379 Types
.push_back(New
);
3380 BlockPointerTypes
.InsertNode(New
, InsertPos
);
3381 return QualType(New
, 0);
3384 /// getLValueReferenceType - Return the uniqued reference to the type for an
3385 /// lvalue reference to the specified type.
3387 ASTContext::getLValueReferenceType(QualType T
, bool SpelledAsLValue
) const {
3388 assert((!T
->isPlaceholderType() ||
3389 T
->isSpecificPlaceholderType(BuiltinType::UnknownAny
)) &&
3390 "Unresolved placeholder type");
3392 // Unique pointers, to guarantee there is only one pointer of a particular
3394 llvm::FoldingSetNodeID ID
;
3395 ReferenceType::Profile(ID
, T
, SpelledAsLValue
);
3397 void *InsertPos
= nullptr;
3398 if (LValueReferenceType
*RT
=
3399 LValueReferenceTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
3400 return QualType(RT
, 0);
3402 const auto *InnerRef
= T
->getAs
<ReferenceType
>();
3404 // If the referencee type isn't canonical, this won't be a canonical type
3405 // either, so fill in the canonical type field.
3407 if (!SpelledAsLValue
|| InnerRef
|| !T
.isCanonical()) {
3408 QualType PointeeType
= (InnerRef
? InnerRef
->getPointeeType() : T
);
3409 Canonical
= getLValueReferenceType(getCanonicalType(PointeeType
));
3411 // Get the new insert position for the node we care about.
3412 LValueReferenceType
*NewIP
=
3413 LValueReferenceTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
3414 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
3417 auto *New
= new (*this, alignof(LValueReferenceType
))
3418 LValueReferenceType(T
, Canonical
, SpelledAsLValue
);
3419 Types
.push_back(New
);
3420 LValueReferenceTypes
.InsertNode(New
, InsertPos
);
3422 return QualType(New
, 0);
3425 /// getRValueReferenceType - Return the uniqued reference to the type for an
3426 /// rvalue reference to the specified type.
3427 QualType
ASTContext::getRValueReferenceType(QualType T
) const {
3428 assert((!T
->isPlaceholderType() ||
3429 T
->isSpecificPlaceholderType(BuiltinType::UnknownAny
)) &&
3430 "Unresolved placeholder type");
3432 // Unique pointers, to guarantee there is only one pointer of a particular
3434 llvm::FoldingSetNodeID ID
;
3435 ReferenceType::Profile(ID
, T
, false);
3437 void *InsertPos
= nullptr;
3438 if (RValueReferenceType
*RT
=
3439 RValueReferenceTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
3440 return QualType(RT
, 0);
3442 const auto *InnerRef
= T
->getAs
<ReferenceType
>();
3444 // If the referencee type isn't canonical, this won't be a canonical type
3445 // either, so fill in the canonical type field.
3447 if (InnerRef
|| !T
.isCanonical()) {
3448 QualType PointeeType
= (InnerRef
? InnerRef
->getPointeeType() : T
);
3449 Canonical
= getRValueReferenceType(getCanonicalType(PointeeType
));
3451 // Get the new insert position for the node we care about.
3452 RValueReferenceType
*NewIP
=
3453 RValueReferenceTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
3454 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
3457 auto *New
= new (*this, alignof(RValueReferenceType
))
3458 RValueReferenceType(T
, Canonical
);
3459 Types
.push_back(New
);
3460 RValueReferenceTypes
.InsertNode(New
, InsertPos
);
3461 return QualType(New
, 0);
3464 /// getMemberPointerType - Return the uniqued reference to the type for a
3465 /// member pointer to the specified type, in the specified class.
3466 QualType
ASTContext::getMemberPointerType(QualType T
, const Type
*Cls
) const {
3467 // Unique pointers, to guarantee there is only one pointer of a particular
3469 llvm::FoldingSetNodeID ID
;
3470 MemberPointerType::Profile(ID
, T
, Cls
);
3472 void *InsertPos
= nullptr;
3473 if (MemberPointerType
*PT
=
3474 MemberPointerTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
3475 return QualType(PT
, 0);
3477 // If the pointee or class type isn't canonical, this won't be a canonical
3478 // type either, so fill in the canonical type field.
3480 if (!T
.isCanonical() || !Cls
->isCanonicalUnqualified()) {
3481 Canonical
= getMemberPointerType(getCanonicalType(T
),getCanonicalType(Cls
));
3483 // Get the new insert position for the node we care about.
3484 MemberPointerType
*NewIP
=
3485 MemberPointerTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
3486 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
3488 auto *New
= new (*this, alignof(MemberPointerType
))
3489 MemberPointerType(T
, Cls
, Canonical
);
3490 Types
.push_back(New
);
3491 MemberPointerTypes
.InsertNode(New
, InsertPos
);
3492 return QualType(New
, 0);
3495 /// getConstantArrayType - Return the unique reference to the type for an
3496 /// array of the specified element type.
3497 QualType
ASTContext::getConstantArrayType(QualType EltTy
,
3498 const llvm::APInt
&ArySizeIn
,
3499 const Expr
*SizeExpr
,
3500 ArraySizeModifier ASM
,
3501 unsigned IndexTypeQuals
) const {
3502 assert((EltTy
->isDependentType() ||
3503 EltTy
->isIncompleteType() || EltTy
->isConstantSizeType()) &&
3504 "Constant array of VLAs is illegal!");
3506 // We only need the size as part of the type if it's instantiation-dependent.
3507 if (SizeExpr
&& !SizeExpr
->isInstantiationDependent())
3510 // Convert the array size into a canonical width matching the pointer size for
3512 llvm::APInt
ArySize(ArySizeIn
);
3513 ArySize
= ArySize
.zextOrTrunc(Target
->getMaxPointerWidth());
3515 llvm::FoldingSetNodeID ID
;
3516 ConstantArrayType::Profile(ID
, *this, EltTy
, ArySize
, SizeExpr
, ASM
,
3519 void *InsertPos
= nullptr;
3520 if (ConstantArrayType
*ATP
=
3521 ConstantArrayTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
3522 return QualType(ATP
, 0);
3524 // If the element type isn't canonical or has qualifiers, or the array bound
3525 // is instantiation-dependent, this won't be a canonical type either, so fill
3526 // in the canonical type field.
3528 // FIXME: Check below should look for qualifiers behind sugar.
3529 if (!EltTy
.isCanonical() || EltTy
.hasLocalQualifiers() || SizeExpr
) {
3530 SplitQualType canonSplit
= getCanonicalType(EltTy
).split();
3531 Canon
= getConstantArrayType(QualType(canonSplit
.Ty
, 0), ArySize
, nullptr,
3532 ASM
, IndexTypeQuals
);
3533 Canon
= getQualifiedType(Canon
, canonSplit
.Quals
);
3535 // Get the new insert position for the node we care about.
3536 ConstantArrayType
*NewIP
=
3537 ConstantArrayTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
3538 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
3541 void *Mem
= Allocate(
3542 ConstantArrayType::totalSizeToAlloc
<const Expr
*>(SizeExpr
? 1 : 0),
3543 alignof(ConstantArrayType
));
3544 auto *New
= new (Mem
)
3545 ConstantArrayType(EltTy
, Canon
, ArySize
, SizeExpr
, ASM
, IndexTypeQuals
);
3546 ConstantArrayTypes
.InsertNode(New
, InsertPos
);
3547 Types
.push_back(New
);
3548 return QualType(New
, 0);
3551 /// getVariableArrayDecayedType - Turns the given type, which may be
3552 /// variably-modified, into the corresponding type with all the known
3553 /// sizes replaced with [*].
3554 QualType
ASTContext::getVariableArrayDecayedType(QualType type
) const {
3555 // Vastly most common case.
3556 if (!type
->isVariablyModifiedType()) return type
;
3560 SplitQualType split
= type
.getSplitDesugaredType();
3561 const Type
*ty
= split
.Ty
;
3562 switch (ty
->getTypeClass()) {
3563 #define TYPE(Class, Base)
3564 #define ABSTRACT_TYPE(Class, Base)
3565 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3566 #include "clang/AST/TypeNodes.inc"
3567 llvm_unreachable("didn't desugar past all non-canonical types?");
3569 // These types should never be variably-modified.
3573 case Type::DependentVector
:
3574 case Type::ExtVector
:
3575 case Type::DependentSizedExtVector
:
3576 case Type::ConstantMatrix
:
3577 case Type::DependentSizedMatrix
:
3578 case Type::DependentAddressSpace
:
3579 case Type::ObjCObject
:
3580 case Type::ObjCInterface
:
3581 case Type::ObjCObjectPointer
:
3584 case Type::UnresolvedUsing
:
3585 case Type::TypeOfExpr
:
3587 case Type::Decltype
:
3588 case Type::UnaryTransform
:
3589 case Type::DependentName
:
3590 case Type::InjectedClassName
:
3591 case Type::TemplateSpecialization
:
3592 case Type::DependentTemplateSpecialization
:
3593 case Type::TemplateTypeParm
:
3594 case Type::SubstTemplateTypeParmPack
:
3596 case Type::DeducedTemplateSpecialization
:
3597 case Type::PackExpansion
:
3599 case Type::DependentBitInt
:
3600 llvm_unreachable("type should never be variably-modified");
3602 // These types can be variably-modified but should never need to
3604 case Type::FunctionNoProto
:
3605 case Type::FunctionProto
:
3606 case Type::BlockPointer
:
3607 case Type::MemberPointer
:
3611 // These types can be variably-modified. All these modifications
3612 // preserve structure except as noted by comments.
3613 // TODO: if we ever care about optimizing VLAs, there are no-op
3614 // optimizations available here.
3616 result
= getPointerType(getVariableArrayDecayedType(
3617 cast
<PointerType
>(ty
)->getPointeeType()));
3620 case Type::LValueReference
: {
3621 const auto *lv
= cast
<LValueReferenceType
>(ty
);
3622 result
= getLValueReferenceType(
3623 getVariableArrayDecayedType(lv
->getPointeeType()),
3624 lv
->isSpelledAsLValue());
3628 case Type::RValueReference
: {
3629 const auto *lv
= cast
<RValueReferenceType
>(ty
);
3630 result
= getRValueReferenceType(
3631 getVariableArrayDecayedType(lv
->getPointeeType()));
3635 case Type::Atomic
: {
3636 const auto *at
= cast
<AtomicType
>(ty
);
3637 result
= getAtomicType(getVariableArrayDecayedType(at
->getValueType()));
3641 case Type::ConstantArray
: {
3642 const auto *cat
= cast
<ConstantArrayType
>(ty
);
3643 result
= getConstantArrayType(
3644 getVariableArrayDecayedType(cat
->getElementType()),
3647 cat
->getSizeModifier(),
3648 cat
->getIndexTypeCVRQualifiers());
3652 case Type::DependentSizedArray
: {
3653 const auto *dat
= cast
<DependentSizedArrayType
>(ty
);
3654 result
= getDependentSizedArrayType(
3655 getVariableArrayDecayedType(dat
->getElementType()),
3657 dat
->getSizeModifier(),
3658 dat
->getIndexTypeCVRQualifiers(),
3659 dat
->getBracketsRange());
3663 // Turn incomplete types into [*] types.
3664 case Type::IncompleteArray
: {
3665 const auto *iat
= cast
<IncompleteArrayType
>(ty
);
3667 getVariableArrayType(getVariableArrayDecayedType(iat
->getElementType()),
3668 /*size*/ nullptr, ArraySizeModifier::Normal
,
3669 iat
->getIndexTypeCVRQualifiers(), SourceRange());
3673 // Turn VLA types into [*] types.
3674 case Type::VariableArray
: {
3675 const auto *vat
= cast
<VariableArrayType
>(ty
);
3676 result
= getVariableArrayType(
3677 getVariableArrayDecayedType(vat
->getElementType()),
3678 /*size*/ nullptr, ArraySizeModifier::Star
,
3679 vat
->getIndexTypeCVRQualifiers(), vat
->getBracketsRange());
3684 // Apply the top-level qualifiers from the original.
3685 return getQualifiedType(result
, split
.Quals
);
3688 /// getVariableArrayType - Returns a non-unique reference to the type for a
3689 /// variable array of the specified element type.
3690 QualType
ASTContext::getVariableArrayType(QualType EltTy
, Expr
*NumElts
,
3691 ArraySizeModifier ASM
,
3692 unsigned IndexTypeQuals
,
3693 SourceRange Brackets
) const {
3694 // Since we don't unique expressions, it isn't possible to unique VLA's
3695 // that have an expression provided for their size.
3698 // Be sure to pull qualifiers off the element type.
3699 // FIXME: Check below should look for qualifiers behind sugar.
3700 if (!EltTy
.isCanonical() || EltTy
.hasLocalQualifiers()) {
3701 SplitQualType canonSplit
= getCanonicalType(EltTy
).split();
3702 Canon
= getVariableArrayType(QualType(canonSplit
.Ty
, 0), NumElts
, ASM
,
3703 IndexTypeQuals
, Brackets
);
3704 Canon
= getQualifiedType(Canon
, canonSplit
.Quals
);
3707 auto *New
= new (*this, alignof(VariableArrayType
))
3708 VariableArrayType(EltTy
, Canon
, NumElts
, ASM
, IndexTypeQuals
, Brackets
);
3710 VariableArrayTypes
.push_back(New
);
3711 Types
.push_back(New
);
3712 return QualType(New
, 0);
3715 /// getDependentSizedArrayType - Returns a non-unique reference to
3716 /// the type for a dependently-sized array of the specified element
3718 QualType
ASTContext::getDependentSizedArrayType(QualType elementType
,
3720 ArraySizeModifier ASM
,
3721 unsigned elementTypeQuals
,
3722 SourceRange brackets
) const {
3723 assert((!numElements
|| numElements
->isTypeDependent() ||
3724 numElements
->isValueDependent()) &&
3725 "Size must be type- or value-dependent!");
3727 // Dependently-sized array types that do not have a specified number
3728 // of elements will have their sizes deduced from a dependent
3729 // initializer. We do no canonicalization here at all, which is okay
3730 // because they can't be used in most locations.
3732 auto *newType
= new (*this, alignof(DependentSizedArrayType
))
3733 DependentSizedArrayType(elementType
, QualType(), numElements
, ASM
,
3734 elementTypeQuals
, brackets
);
3735 Types
.push_back(newType
);
3736 return QualType(newType
, 0);
3739 // Otherwise, we actually build a new type every time, but we
3740 // also build a canonical type.
3742 SplitQualType canonElementType
= getCanonicalType(elementType
).split();
3744 void *insertPos
= nullptr;
3745 llvm::FoldingSetNodeID ID
;
3746 DependentSizedArrayType::Profile(ID
, *this,
3747 QualType(canonElementType
.Ty
, 0),
3748 ASM
, elementTypeQuals
, numElements
);
3750 // Look for an existing type with these properties.
3751 DependentSizedArrayType
*canonTy
=
3752 DependentSizedArrayTypes
.FindNodeOrInsertPos(ID
, insertPos
);
3754 // If we don't have one, build one.
3756 canonTy
= new (*this, alignof(DependentSizedArrayType
))
3757 DependentSizedArrayType(QualType(canonElementType
.Ty
, 0), QualType(),
3758 numElements
, ASM
, elementTypeQuals
, brackets
);
3759 DependentSizedArrayTypes
.InsertNode(canonTy
, insertPos
);
3760 Types
.push_back(canonTy
);
3763 // Apply qualifiers from the element type to the array.
3764 QualType canon
= getQualifiedType(QualType(canonTy
,0),
3765 canonElementType
.Quals
);
3767 // If we didn't need extra canonicalization for the element type or the size
3768 // expression, then just use that as our result.
3769 if (QualType(canonElementType
.Ty
, 0) == elementType
&&
3770 canonTy
->getSizeExpr() == numElements
)
3773 // Otherwise, we need to build a type which follows the spelling
3774 // of the element type.
3775 auto *sugaredType
= new (*this, alignof(DependentSizedArrayType
))
3776 DependentSizedArrayType(elementType
, canon
, numElements
, ASM
,
3777 elementTypeQuals
, brackets
);
3778 Types
.push_back(sugaredType
);
3779 return QualType(sugaredType
, 0);
3782 QualType
ASTContext::getIncompleteArrayType(QualType elementType
,
3783 ArraySizeModifier ASM
,
3784 unsigned elementTypeQuals
) const {
3785 llvm::FoldingSetNodeID ID
;
3786 IncompleteArrayType::Profile(ID
, elementType
, ASM
, elementTypeQuals
);
3788 void *insertPos
= nullptr;
3789 if (IncompleteArrayType
*iat
=
3790 IncompleteArrayTypes
.FindNodeOrInsertPos(ID
, insertPos
))
3791 return QualType(iat
, 0);
3793 // If the element type isn't canonical, this won't be a canonical type
3794 // either, so fill in the canonical type field. We also have to pull
3795 // qualifiers off the element type.
3798 // FIXME: Check below should look for qualifiers behind sugar.
3799 if (!elementType
.isCanonical() || elementType
.hasLocalQualifiers()) {
3800 SplitQualType canonSplit
= getCanonicalType(elementType
).split();
3801 canon
= getIncompleteArrayType(QualType(canonSplit
.Ty
, 0),
3802 ASM
, elementTypeQuals
);
3803 canon
= getQualifiedType(canon
, canonSplit
.Quals
);
3805 // Get the new insert position for the node we care about.
3806 IncompleteArrayType
*existing
=
3807 IncompleteArrayTypes
.FindNodeOrInsertPos(ID
, insertPos
);
3808 assert(!existing
&& "Shouldn't be in the map!"); (void) existing
;
3811 auto *newType
= new (*this, alignof(IncompleteArrayType
))
3812 IncompleteArrayType(elementType
, canon
, ASM
, elementTypeQuals
);
3814 IncompleteArrayTypes
.InsertNode(newType
, insertPos
);
3815 Types
.push_back(newType
);
3816 return QualType(newType
, 0);
3819 ASTContext::BuiltinVectorTypeInfo
3820 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType
*Ty
) const {
3821 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
3822 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
3825 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
3826 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
3828 switch (Ty
->getKind()) {
3830 llvm_unreachable("Unsupported builtin vector type");
3831 case BuiltinType::SveInt8
:
3832 return SVE_INT_ELTTY(8, 16, true, 1);
3833 case BuiltinType::SveUint8
:
3834 return SVE_INT_ELTTY(8, 16, false, 1);
3835 case BuiltinType::SveInt8x2
:
3836 return SVE_INT_ELTTY(8, 16, true, 2);
3837 case BuiltinType::SveUint8x2
:
3838 return SVE_INT_ELTTY(8, 16, false, 2);
3839 case BuiltinType::SveInt8x3
:
3840 return SVE_INT_ELTTY(8, 16, true, 3);
3841 case BuiltinType::SveUint8x3
:
3842 return SVE_INT_ELTTY(8, 16, false, 3);
3843 case BuiltinType::SveInt8x4
:
3844 return SVE_INT_ELTTY(8, 16, true, 4);
3845 case BuiltinType::SveUint8x4
:
3846 return SVE_INT_ELTTY(8, 16, false, 4);
3847 case BuiltinType::SveInt16
:
3848 return SVE_INT_ELTTY(16, 8, true, 1);
3849 case BuiltinType::SveUint16
:
3850 return SVE_INT_ELTTY(16, 8, false, 1);
3851 case BuiltinType::SveInt16x2
:
3852 return SVE_INT_ELTTY(16, 8, true, 2);
3853 case BuiltinType::SveUint16x2
:
3854 return SVE_INT_ELTTY(16, 8, false, 2);
3855 case BuiltinType::SveInt16x3
:
3856 return SVE_INT_ELTTY(16, 8, true, 3);
3857 case BuiltinType::SveUint16x3
:
3858 return SVE_INT_ELTTY(16, 8, false, 3);
3859 case BuiltinType::SveInt16x4
:
3860 return SVE_INT_ELTTY(16, 8, true, 4);
3861 case BuiltinType::SveUint16x4
:
3862 return SVE_INT_ELTTY(16, 8, false, 4);
3863 case BuiltinType::SveInt32
:
3864 return SVE_INT_ELTTY(32, 4, true, 1);
3865 case BuiltinType::SveUint32
:
3866 return SVE_INT_ELTTY(32, 4, false, 1);
3867 case BuiltinType::SveInt32x2
:
3868 return SVE_INT_ELTTY(32, 4, true, 2);
3869 case BuiltinType::SveUint32x2
:
3870 return SVE_INT_ELTTY(32, 4, false, 2);
3871 case BuiltinType::SveInt32x3
:
3872 return SVE_INT_ELTTY(32, 4, true, 3);
3873 case BuiltinType::SveUint32x3
:
3874 return SVE_INT_ELTTY(32, 4, false, 3);
3875 case BuiltinType::SveInt32x4
:
3876 return SVE_INT_ELTTY(32, 4, true, 4);
3877 case BuiltinType::SveUint32x4
:
3878 return SVE_INT_ELTTY(32, 4, false, 4);
3879 case BuiltinType::SveInt64
:
3880 return SVE_INT_ELTTY(64, 2, true, 1);
3881 case BuiltinType::SveUint64
:
3882 return SVE_INT_ELTTY(64, 2, false, 1);
3883 case BuiltinType::SveInt64x2
:
3884 return SVE_INT_ELTTY(64, 2, true, 2);
3885 case BuiltinType::SveUint64x2
:
3886 return SVE_INT_ELTTY(64, 2, false, 2);
3887 case BuiltinType::SveInt64x3
:
3888 return SVE_INT_ELTTY(64, 2, true, 3);
3889 case BuiltinType::SveUint64x3
:
3890 return SVE_INT_ELTTY(64, 2, false, 3);
3891 case BuiltinType::SveInt64x4
:
3892 return SVE_INT_ELTTY(64, 2, true, 4);
3893 case BuiltinType::SveUint64x4
:
3894 return SVE_INT_ELTTY(64, 2, false, 4);
3895 case BuiltinType::SveBool
:
3896 return SVE_ELTTY(BoolTy
, 16, 1);
3897 case BuiltinType::SveBoolx2
:
3898 return SVE_ELTTY(BoolTy
, 16, 2);
3899 case BuiltinType::SveBoolx4
:
3900 return SVE_ELTTY(BoolTy
, 16, 4);
3901 case BuiltinType::SveFloat16
:
3902 return SVE_ELTTY(HalfTy
, 8, 1);
3903 case BuiltinType::SveFloat16x2
:
3904 return SVE_ELTTY(HalfTy
, 8, 2);
3905 case BuiltinType::SveFloat16x3
:
3906 return SVE_ELTTY(HalfTy
, 8, 3);
3907 case BuiltinType::SveFloat16x4
:
3908 return SVE_ELTTY(HalfTy
, 8, 4);
3909 case BuiltinType::SveFloat32
:
3910 return SVE_ELTTY(FloatTy
, 4, 1);
3911 case BuiltinType::SveFloat32x2
:
3912 return SVE_ELTTY(FloatTy
, 4, 2);
3913 case BuiltinType::SveFloat32x3
:
3914 return SVE_ELTTY(FloatTy
, 4, 3);
3915 case BuiltinType::SveFloat32x4
:
3916 return SVE_ELTTY(FloatTy
, 4, 4);
3917 case BuiltinType::SveFloat64
:
3918 return SVE_ELTTY(DoubleTy
, 2, 1);
3919 case BuiltinType::SveFloat64x2
:
3920 return SVE_ELTTY(DoubleTy
, 2, 2);
3921 case BuiltinType::SveFloat64x3
:
3922 return SVE_ELTTY(DoubleTy
, 2, 3);
3923 case BuiltinType::SveFloat64x4
:
3924 return SVE_ELTTY(DoubleTy
, 2, 4);
3925 case BuiltinType::SveBFloat16
:
3926 return SVE_ELTTY(BFloat16Ty
, 8, 1);
3927 case BuiltinType::SveBFloat16x2
:
3928 return SVE_ELTTY(BFloat16Ty
, 8, 2);
3929 case BuiltinType::SveBFloat16x3
:
3930 return SVE_ELTTY(BFloat16Ty
, 8, 3);
3931 case BuiltinType::SveBFloat16x4
:
3932 return SVE_ELTTY(BFloat16Ty
, 8, 4);
3933 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
3935 case BuiltinType::Id: \
3936 return {getIntTypeForBitwidth(ElBits, IsSigned), \
3937 llvm::ElementCount::getScalable(NumEls), NF};
3938 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
3939 case BuiltinType::Id: \
3940 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
3941 llvm::ElementCount::getScalable(NumEls), NF};
3942 #define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
3943 case BuiltinType::Id: \
3944 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
3945 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
3946 case BuiltinType::Id: \
3947 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
3948 #include "clang/Basic/RISCVVTypes.def"
3952 /// getExternrefType - Return a WebAssembly externref type, which represents an
3953 /// opaque reference to a host value.
3954 QualType
ASTContext::getWebAssemblyExternrefType() const {
3955 if (Target
->getTriple().isWasm() && Target
->hasFeature("reference-types")) {
3956 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
3957 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
3959 #include "clang/Basic/WebAssemblyReferenceTypes.def"
3962 "shouldn't try to generate type externref outside WebAssembly target");
3965 /// getScalableVectorType - Return the unique reference to a scalable vector
3966 /// type of the specified element type and size. VectorType must be a built-in
3968 QualType
ASTContext::getScalableVectorType(QualType EltTy
, unsigned NumElts
,
3969 unsigned NumFields
) const {
3970 if (Target
->hasAArch64SVETypes()) {
3971 uint64_t EltTySize
= getTypeSize(EltTy
);
3972 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
3973 IsSigned, IsFP, IsBF) \
3974 if (!EltTy->isBooleanType() && \
3975 ((EltTy->hasIntegerRepresentation() && \
3976 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
3977 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
3979 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
3980 IsBF && !IsFP)) && \
3981 EltTySize == ElBits && NumElts == NumEls) { \
3982 return SingletonId; \
3984 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
3985 if (EltTy->isBooleanType() && NumElts == NumEls) \
3987 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId)
3988 #include "clang/Basic/AArch64SVEACLETypes.def"
3989 } else if (Target
->hasRISCVVTypes()) {
3990 uint64_t EltTySize
= getTypeSize(EltTy
);
3991 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
3993 if (!EltTy->isBooleanType() && \
3994 ((EltTy->hasIntegerRepresentation() && \
3995 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
3996 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
3998 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
3999 IsBF && !IsFP)) && \
4000 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
4002 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4003 if (EltTy->isBooleanType() && NumElts == NumEls) \
4005 #include "clang/Basic/RISCVVTypes.def"
4010 /// getVectorType - Return the unique reference to a vector type of
4011 /// the specified element type and size. VectorType must be a built-in type.
4012 QualType
ASTContext::getVectorType(QualType vecType
, unsigned NumElts
,
4013 VectorKind VecKind
) const {
4014 assert(vecType
->isBuiltinType() ||
4015 (vecType
->isBitIntType() &&
4016 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4017 llvm::isPowerOf2_32(vecType
->getAs
<BitIntType
>()->getNumBits()) &&
4018 vecType
->getAs
<BitIntType
>()->getNumBits() >= 8));
4020 // Check if we've already instantiated a vector of this type.
4021 llvm::FoldingSetNodeID ID
;
4022 VectorType::Profile(ID
, vecType
, NumElts
, Type::Vector
, VecKind
);
4024 void *InsertPos
= nullptr;
4025 if (VectorType
*VTP
= VectorTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
4026 return QualType(VTP
, 0);
4028 // If the element type isn't canonical, this won't be a canonical type either,
4029 // so fill in the canonical type field.
4031 if (!vecType
.isCanonical()) {
4032 Canonical
= getVectorType(getCanonicalType(vecType
), NumElts
, VecKind
);
4034 // Get the new insert position for the node we care about.
4035 VectorType
*NewIP
= VectorTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4036 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
4038 auto *New
= new (*this, alignof(VectorType
))
4039 VectorType(vecType
, NumElts
, Canonical
, VecKind
);
4040 VectorTypes
.InsertNode(New
, InsertPos
);
4041 Types
.push_back(New
);
4042 return QualType(New
, 0);
4045 QualType
ASTContext::getDependentVectorType(QualType VecType
, Expr
*SizeExpr
,
4046 SourceLocation AttrLoc
,
4047 VectorKind VecKind
) const {
4048 llvm::FoldingSetNodeID ID
;
4049 DependentVectorType::Profile(ID
, *this, getCanonicalType(VecType
), SizeExpr
,
4051 void *InsertPos
= nullptr;
4052 DependentVectorType
*Canon
=
4053 DependentVectorTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4054 DependentVectorType
*New
;
4057 New
= new (*this, alignof(DependentVectorType
)) DependentVectorType(
4058 VecType
, QualType(Canon
, 0), SizeExpr
, AttrLoc
, VecKind
);
4060 QualType CanonVecTy
= getCanonicalType(VecType
);
4061 if (CanonVecTy
== VecType
) {
4062 New
= new (*this, alignof(DependentVectorType
))
4063 DependentVectorType(VecType
, QualType(), SizeExpr
, AttrLoc
, VecKind
);
4065 DependentVectorType
*CanonCheck
=
4066 DependentVectorTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4067 assert(!CanonCheck
&&
4068 "Dependent-sized vector_size canonical type broken");
4070 DependentVectorTypes
.InsertNode(New
, InsertPos
);
4072 QualType CanonTy
= getDependentVectorType(CanonVecTy
, SizeExpr
,
4073 SourceLocation(), VecKind
);
4074 New
= new (*this, alignof(DependentVectorType
))
4075 DependentVectorType(VecType
, CanonTy
, SizeExpr
, AttrLoc
, VecKind
);
4079 Types
.push_back(New
);
4080 return QualType(New
, 0);
4083 /// getExtVectorType - Return the unique reference to an extended vector type of
4084 /// the specified element type and size. VectorType must be a built-in type.
4085 QualType
ASTContext::getExtVectorType(QualType vecType
,
4086 unsigned NumElts
) const {
4087 assert(vecType
->isBuiltinType() || vecType
->isDependentType() ||
4088 (vecType
->isBitIntType() &&
4089 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4090 llvm::isPowerOf2_32(vecType
->castAs
<BitIntType
>()->getNumBits()) &&
4091 vecType
->castAs
<BitIntType
>()->getNumBits() >= 8));
4093 // Check if we've already instantiated a vector of this type.
4094 llvm::FoldingSetNodeID ID
;
4095 VectorType::Profile(ID
, vecType
, NumElts
, Type::ExtVector
,
4096 VectorKind::Generic
);
4097 void *InsertPos
= nullptr;
4098 if (VectorType
*VTP
= VectorTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
4099 return QualType(VTP
, 0);
4101 // If the element type isn't canonical, this won't be a canonical type either,
4102 // so fill in the canonical type field.
4104 if (!vecType
.isCanonical()) {
4105 Canonical
= getExtVectorType(getCanonicalType(vecType
), NumElts
);
4107 // Get the new insert position for the node we care about.
4108 VectorType
*NewIP
= VectorTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4109 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
4111 auto *New
= new (*this, alignof(ExtVectorType
))
4112 ExtVectorType(vecType
, NumElts
, Canonical
);
4113 VectorTypes
.InsertNode(New
, InsertPos
);
4114 Types
.push_back(New
);
4115 return QualType(New
, 0);
4119 ASTContext::getDependentSizedExtVectorType(QualType vecType
,
4121 SourceLocation AttrLoc
) const {
4122 llvm::FoldingSetNodeID ID
;
4123 DependentSizedExtVectorType::Profile(ID
, *this, getCanonicalType(vecType
),
4126 void *InsertPos
= nullptr;
4127 DependentSizedExtVectorType
*Canon
4128 = DependentSizedExtVectorTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4129 DependentSizedExtVectorType
*New
;
4131 // We already have a canonical version of this array type; use it as
4132 // the canonical type for a newly-built type.
4133 New
= new (*this, alignof(DependentSizedExtVectorType
))
4134 DependentSizedExtVectorType(vecType
, QualType(Canon
, 0), SizeExpr
,
4137 QualType CanonVecTy
= getCanonicalType(vecType
);
4138 if (CanonVecTy
== vecType
) {
4139 New
= new (*this, alignof(DependentSizedExtVectorType
))
4140 DependentSizedExtVectorType(vecType
, QualType(), SizeExpr
, AttrLoc
);
4142 DependentSizedExtVectorType
*CanonCheck
4143 = DependentSizedExtVectorTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4144 assert(!CanonCheck
&& "Dependent-sized ext_vector canonical type broken");
4146 DependentSizedExtVectorTypes
.InsertNode(New
, InsertPos
);
4148 QualType CanonExtTy
= getDependentSizedExtVectorType(CanonVecTy
, SizeExpr
,
4150 New
= new (*this, alignof(DependentSizedExtVectorType
))
4151 DependentSizedExtVectorType(vecType
, CanonExtTy
, SizeExpr
, AttrLoc
);
4155 Types
.push_back(New
);
4156 return QualType(New
, 0);
4159 QualType
ASTContext::getConstantMatrixType(QualType ElementTy
, unsigned NumRows
,
4160 unsigned NumColumns
) const {
4161 llvm::FoldingSetNodeID ID
;
4162 ConstantMatrixType::Profile(ID
, ElementTy
, NumRows
, NumColumns
,
4163 Type::ConstantMatrix
);
4165 assert(MatrixType::isValidElementType(ElementTy
) &&
4166 "need a valid element type");
4167 assert(ConstantMatrixType::isDimensionValid(NumRows
) &&
4168 ConstantMatrixType::isDimensionValid(NumColumns
) &&
4169 "need valid matrix dimensions");
4170 void *InsertPos
= nullptr;
4171 if (ConstantMatrixType
*MTP
= MatrixTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
4172 return QualType(MTP
, 0);
4175 if (!ElementTy
.isCanonical()) {
4177 getConstantMatrixType(getCanonicalType(ElementTy
), NumRows
, NumColumns
);
4179 ConstantMatrixType
*NewIP
= MatrixTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4180 assert(!NewIP
&& "Matrix type shouldn't already exist in the map");
4184 auto *New
= new (*this, alignof(ConstantMatrixType
))
4185 ConstantMatrixType(ElementTy
, NumRows
, NumColumns
, Canonical
);
4186 MatrixTypes
.InsertNode(New
, InsertPos
);
4187 Types
.push_back(New
);
4188 return QualType(New
, 0);
4191 QualType
ASTContext::getDependentSizedMatrixType(QualType ElementTy
,
4194 SourceLocation AttrLoc
) const {
4195 QualType CanonElementTy
= getCanonicalType(ElementTy
);
4196 llvm::FoldingSetNodeID ID
;
4197 DependentSizedMatrixType::Profile(ID
, *this, CanonElementTy
, RowExpr
,
4200 void *InsertPos
= nullptr;
4201 DependentSizedMatrixType
*Canon
=
4202 DependentSizedMatrixTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4205 Canon
= new (*this, alignof(DependentSizedMatrixType
))
4206 DependentSizedMatrixType(CanonElementTy
, QualType(), RowExpr
,
4207 ColumnExpr
, AttrLoc
);
4209 DependentSizedMatrixType
*CanonCheck
=
4210 DependentSizedMatrixTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4211 assert(!CanonCheck
&& "Dependent-sized matrix canonical type broken");
4213 DependentSizedMatrixTypes
.InsertNode(Canon
, InsertPos
);
4214 Types
.push_back(Canon
);
4217 // Already have a canonical version of the matrix type
4219 // If it exactly matches the requested type, use it directly.
4220 if (Canon
->getElementType() == ElementTy
&& Canon
->getRowExpr() == RowExpr
&&
4221 Canon
->getRowExpr() == ColumnExpr
)
4222 return QualType(Canon
, 0);
4224 // Use Canon as the canonical type for newly-built type.
4225 DependentSizedMatrixType
*New
= new (*this, alignof(DependentSizedMatrixType
))
4226 DependentSizedMatrixType(ElementTy
, QualType(Canon
, 0), RowExpr
,
4227 ColumnExpr
, AttrLoc
);
4228 Types
.push_back(New
);
4229 return QualType(New
, 0);
4232 QualType
ASTContext::getDependentAddressSpaceType(QualType PointeeType
,
4233 Expr
*AddrSpaceExpr
,
4234 SourceLocation AttrLoc
) const {
4235 assert(AddrSpaceExpr
->isInstantiationDependent());
4237 QualType canonPointeeType
= getCanonicalType(PointeeType
);
4239 void *insertPos
= nullptr;
4240 llvm::FoldingSetNodeID ID
;
4241 DependentAddressSpaceType::Profile(ID
, *this, canonPointeeType
,
4244 DependentAddressSpaceType
*canonTy
=
4245 DependentAddressSpaceTypes
.FindNodeOrInsertPos(ID
, insertPos
);
4248 canonTy
= new (*this, alignof(DependentAddressSpaceType
))
4249 DependentAddressSpaceType(canonPointeeType
, QualType(), AddrSpaceExpr
,
4251 DependentAddressSpaceTypes
.InsertNode(canonTy
, insertPos
);
4252 Types
.push_back(canonTy
);
4255 if (canonPointeeType
== PointeeType
&&
4256 canonTy
->getAddrSpaceExpr() == AddrSpaceExpr
)
4257 return QualType(canonTy
, 0);
4259 auto *sugaredType
= new (*this, alignof(DependentAddressSpaceType
))
4260 DependentAddressSpaceType(PointeeType
, QualType(canonTy
, 0),
4261 AddrSpaceExpr
, AttrLoc
);
4262 Types
.push_back(sugaredType
);
4263 return QualType(sugaredType
, 0);
4266 /// Determine whether \p T is canonical as the result type of a function.
4267 static bool isCanonicalResultType(QualType T
) {
4268 return T
.isCanonical() &&
4269 (T
.getObjCLifetime() == Qualifiers::OCL_None
||
4270 T
.getObjCLifetime() == Qualifiers::OCL_ExplicitNone
);
4273 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4275 ASTContext::getFunctionNoProtoType(QualType ResultTy
,
4276 const FunctionType::ExtInfo
&Info
) const {
4277 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4278 // functionality creates a function without a prototype regardless of
4279 // language mode (so it makes them even in C++). Once the rewriter has been
4280 // fixed, this assertion can be enabled again.
4281 //assert(!LangOpts.requiresStrictPrototypes() &&
4282 // "strict prototypes are disabled");
4284 // Unique functions, to guarantee there is only one function of a particular
4286 llvm::FoldingSetNodeID ID
;
4287 FunctionNoProtoType::Profile(ID
, ResultTy
, Info
);
4289 void *InsertPos
= nullptr;
4290 if (FunctionNoProtoType
*FT
=
4291 FunctionNoProtoTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
4292 return QualType(FT
, 0);
4295 if (!isCanonicalResultType(ResultTy
)) {
4297 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy
), Info
);
4299 // Get the new insert position for the node we care about.
4300 FunctionNoProtoType
*NewIP
=
4301 FunctionNoProtoTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4302 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
4305 auto *New
= new (*this, alignof(FunctionNoProtoType
))
4306 FunctionNoProtoType(ResultTy
, Canonical
, Info
);
4307 Types
.push_back(New
);
4308 FunctionNoProtoTypes
.InsertNode(New
, InsertPos
);
4309 return QualType(New
, 0);
4313 ASTContext::getCanonicalFunctionResultType(QualType ResultType
) const {
4314 CanQualType CanResultType
= getCanonicalType(ResultType
);
4316 // Canonical result types do not have ARC lifetime qualifiers.
4317 if (CanResultType
.getQualifiers().hasObjCLifetime()) {
4318 Qualifiers Qs
= CanResultType
.getQualifiers();
4319 Qs
.removeObjCLifetime();
4320 return CanQualType::CreateUnsafe(
4321 getQualifiedType(CanResultType
.getUnqualifiedType(), Qs
));
4324 return CanResultType
;
4327 static bool isCanonicalExceptionSpecification(
4328 const FunctionProtoType::ExceptionSpecInfo
&ESI
, bool NoexceptInType
) {
4329 if (ESI
.Type
== EST_None
)
4331 if (!NoexceptInType
)
4334 // C++17 onwards: exception specification is part of the type, as a simple
4335 // boolean "can this function type throw".
4336 if (ESI
.Type
== EST_BasicNoexcept
)
4339 // A noexcept(expr) specification is (possibly) canonical if expr is
4341 if (ESI
.Type
== EST_DependentNoexcept
)
4344 // A dynamic exception specification is canonical if it only contains pack
4345 // expansions (so we can't tell whether it's non-throwing) and all its
4346 // contained types are canonical.
4347 if (ESI
.Type
== EST_Dynamic
) {
4348 bool AnyPackExpansions
= false;
4349 for (QualType ET
: ESI
.Exceptions
) {
4350 if (!ET
.isCanonical())
4352 if (ET
->getAs
<PackExpansionType
>())
4353 AnyPackExpansions
= true;
4355 return AnyPackExpansions
;
4361 QualType
ASTContext::getFunctionTypeInternal(
4362 QualType ResultTy
, ArrayRef
<QualType
> ArgArray
,
4363 const FunctionProtoType::ExtProtoInfo
&EPI
, bool OnlyWantCanonical
) const {
4364 size_t NumArgs
= ArgArray
.size();
4366 // Unique functions, to guarantee there is only one function of a particular
4368 llvm::FoldingSetNodeID ID
;
4369 FunctionProtoType::Profile(ID
, ResultTy
, ArgArray
.begin(), NumArgs
, EPI
,
4373 bool Unique
= false;
4375 void *InsertPos
= nullptr;
4376 if (FunctionProtoType
*FPT
=
4377 FunctionProtoTypes
.FindNodeOrInsertPos(ID
, InsertPos
)) {
4378 QualType Existing
= QualType(FPT
, 0);
4380 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4381 // it so long as our exception specification doesn't contain a dependent
4382 // noexcept expression, or we're just looking for a canonical type.
4383 // Otherwise, we're going to need to create a type
4384 // sugar node to hold the concrete expression.
4385 if (OnlyWantCanonical
|| !isComputedNoexcept(EPI
.ExceptionSpec
.Type
) ||
4386 EPI
.ExceptionSpec
.NoexceptExpr
== FPT
->getNoexceptExpr())
4389 // We need a new type sugar node for this one, to hold the new noexcept
4390 // expression. We do no canonicalization here, but that's OK since we don't
4391 // expect to see the same noexcept expression much more than once.
4392 Canonical
= getCanonicalType(Existing
);
4396 bool NoexceptInType
= getLangOpts().CPlusPlus17
;
4397 bool IsCanonicalExceptionSpec
=
4398 isCanonicalExceptionSpecification(EPI
.ExceptionSpec
, NoexceptInType
);
4400 // Determine whether the type being created is already canonical or not.
4401 bool isCanonical
= !Unique
&& IsCanonicalExceptionSpec
&&
4402 isCanonicalResultType(ResultTy
) && !EPI
.HasTrailingReturn
;
4403 for (unsigned i
= 0; i
!= NumArgs
&& isCanonical
; ++i
)
4404 if (!ArgArray
[i
].isCanonicalAsParam())
4405 isCanonical
= false;
4407 if (OnlyWantCanonical
)
4408 assert(isCanonical
&&
4409 "given non-canonical parameters constructing canonical type");
4411 // If this type isn't canonical, get the canonical version of it if we don't
4412 // already have it. The exception spec is only partially part of the
4413 // canonical type, and only in C++17 onwards.
4414 if (!isCanonical
&& Canonical
.isNull()) {
4415 SmallVector
<QualType
, 16> CanonicalArgs
;
4416 CanonicalArgs
.reserve(NumArgs
);
4417 for (unsigned i
= 0; i
!= NumArgs
; ++i
)
4418 CanonicalArgs
.push_back(getCanonicalParamType(ArgArray
[i
]));
4420 llvm::SmallVector
<QualType
, 8> ExceptionTypeStorage
;
4421 FunctionProtoType::ExtProtoInfo CanonicalEPI
= EPI
;
4422 CanonicalEPI
.HasTrailingReturn
= false;
4424 if (IsCanonicalExceptionSpec
) {
4425 // Exception spec is already OK.
4426 } else if (NoexceptInType
) {
4427 switch (EPI
.ExceptionSpec
.Type
) {
4428 case EST_Unparsed
: case EST_Unevaluated
: case EST_Uninstantiated
:
4429 // We don't know yet. It shouldn't matter what we pick here; no-one
4430 // should ever look at this.
4432 case EST_None
: case EST_MSAny
: case EST_NoexceptFalse
:
4433 CanonicalEPI
.ExceptionSpec
.Type
= EST_None
;
4436 // A dynamic exception specification is almost always "not noexcept",
4437 // with the exception that a pack expansion might expand to no types.
4439 bool AnyPacks
= false;
4440 for (QualType ET
: EPI
.ExceptionSpec
.Exceptions
) {
4441 if (ET
->getAs
<PackExpansionType
>())
4443 ExceptionTypeStorage
.push_back(getCanonicalType(ET
));
4446 CanonicalEPI
.ExceptionSpec
.Type
= EST_None
;
4448 CanonicalEPI
.ExceptionSpec
.Type
= EST_Dynamic
;
4449 CanonicalEPI
.ExceptionSpec
.Exceptions
= ExceptionTypeStorage
;
4454 case EST_DynamicNone
:
4455 case EST_BasicNoexcept
:
4456 case EST_NoexceptTrue
:
4458 CanonicalEPI
.ExceptionSpec
.Type
= EST_BasicNoexcept
;
4461 case EST_DependentNoexcept
:
4462 llvm_unreachable("dependent noexcept is already canonical");
4465 CanonicalEPI
.ExceptionSpec
= FunctionProtoType::ExceptionSpecInfo();
4468 // Adjust the canonical function result type.
4469 CanQualType CanResultTy
= getCanonicalFunctionResultType(ResultTy
);
4471 getFunctionTypeInternal(CanResultTy
, CanonicalArgs
, CanonicalEPI
, true);
4473 // Get the new insert position for the node we care about.
4474 FunctionProtoType
*NewIP
=
4475 FunctionProtoTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4476 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
4479 // Compute the needed size to hold this FunctionProtoType and the
4480 // various trailing objects.
4481 auto ESH
= FunctionProtoType::getExceptionSpecSize(
4482 EPI
.ExceptionSpec
.Type
, EPI
.ExceptionSpec
.Exceptions
.size());
4483 size_t Size
= FunctionProtoType::totalSizeToAlloc
<
4484 QualType
, SourceLocation
, FunctionType::FunctionTypeExtraBitfields
,
4485 FunctionType::ExceptionType
, Expr
*, FunctionDecl
*,
4486 FunctionProtoType::ExtParameterInfo
, Qualifiers
>(
4487 NumArgs
, EPI
.Variadic
, EPI
.requiresFunctionProtoTypeExtraBitfields(),
4488 ESH
.NumExceptionType
, ESH
.NumExprPtr
, ESH
.NumFunctionDeclPtr
,
4489 EPI
.ExtParameterInfos
? NumArgs
: 0,
4490 EPI
.TypeQuals
.hasNonFastQualifiers() ? 1 : 0);
4492 auto *FTP
= (FunctionProtoType
*)Allocate(Size
, alignof(FunctionProtoType
));
4493 FunctionProtoType::ExtProtoInfo newEPI
= EPI
;
4494 new (FTP
) FunctionProtoType(ResultTy
, ArgArray
, Canonical
, newEPI
);
4495 Types
.push_back(FTP
);
4497 FunctionProtoTypes
.InsertNode(FTP
, InsertPos
);
4498 return QualType(FTP
, 0);
4501 QualType
ASTContext::getPipeType(QualType T
, bool ReadOnly
) const {
4502 llvm::FoldingSetNodeID ID
;
4503 PipeType::Profile(ID
, T
, ReadOnly
);
4505 void *InsertPos
= nullptr;
4506 if (PipeType
*PT
= PipeTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
4507 return QualType(PT
, 0);
4509 // If the pipe element type isn't canonical, this won't be a canonical type
4510 // either, so fill in the canonical type field.
4512 if (!T
.isCanonical()) {
4513 Canonical
= getPipeType(getCanonicalType(T
), ReadOnly
);
4515 // Get the new insert position for the node we care about.
4516 PipeType
*NewIP
= PipeTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4517 assert(!NewIP
&& "Shouldn't be in the map!");
4520 auto *New
= new (*this, alignof(PipeType
)) PipeType(T
, Canonical
, ReadOnly
);
4521 Types
.push_back(New
);
4522 PipeTypes
.InsertNode(New
, InsertPos
);
4523 return QualType(New
, 0);
4526 QualType
ASTContext::adjustStringLiteralBaseType(QualType Ty
) const {
4527 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
4528 return LangOpts
.OpenCL
? getAddrSpaceQualType(Ty
, LangAS::opencl_constant
)
4532 QualType
ASTContext::getReadPipeType(QualType T
) const {
4533 return getPipeType(T
, true);
4536 QualType
ASTContext::getWritePipeType(QualType T
) const {
4537 return getPipeType(T
, false);
4540 QualType
ASTContext::getBitIntType(bool IsUnsigned
, unsigned NumBits
) const {
4541 llvm::FoldingSetNodeID ID
;
4542 BitIntType::Profile(ID
, IsUnsigned
, NumBits
);
4544 void *InsertPos
= nullptr;
4545 if (BitIntType
*EIT
= BitIntTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
4546 return QualType(EIT
, 0);
4548 auto *New
= new (*this, alignof(BitIntType
)) BitIntType(IsUnsigned
, NumBits
);
4549 BitIntTypes
.InsertNode(New
, InsertPos
);
4550 Types
.push_back(New
);
4551 return QualType(New
, 0);
4554 QualType
ASTContext::getDependentBitIntType(bool IsUnsigned
,
4555 Expr
*NumBitsExpr
) const {
4556 assert(NumBitsExpr
->isInstantiationDependent() && "Only good for dependent");
4557 llvm::FoldingSetNodeID ID
;
4558 DependentBitIntType::Profile(ID
, *this, IsUnsigned
, NumBitsExpr
);
4560 void *InsertPos
= nullptr;
4561 if (DependentBitIntType
*Existing
=
4562 DependentBitIntTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
4563 return QualType(Existing
, 0);
4565 auto *New
= new (*this, alignof(DependentBitIntType
))
4566 DependentBitIntType(IsUnsigned
, NumBitsExpr
);
4567 DependentBitIntTypes
.InsertNode(New
, InsertPos
);
4569 Types
.push_back(New
);
4570 return QualType(New
, 0);
4574 static bool NeedsInjectedClassNameType(const RecordDecl
*D
) {
4575 if (!isa
<CXXRecordDecl
>(D
)) return false;
4576 const auto *RD
= cast
<CXXRecordDecl
>(D
);
4577 if (isa
<ClassTemplatePartialSpecializationDecl
>(RD
))
4579 if (RD
->getDescribedClassTemplate() &&
4580 !isa
<ClassTemplateSpecializationDecl
>(RD
))
4586 /// getInjectedClassNameType - Return the unique reference to the
4587 /// injected class name type for the specified templated declaration.
4588 QualType
ASTContext::getInjectedClassNameType(CXXRecordDecl
*Decl
,
4589 QualType TST
) const {
4590 assert(NeedsInjectedClassNameType(Decl
));
4591 if (Decl
->TypeForDecl
) {
4592 assert(isa
<InjectedClassNameType
>(Decl
->TypeForDecl
));
4593 } else if (CXXRecordDecl
*PrevDecl
= Decl
->getPreviousDecl()) {
4594 assert(PrevDecl
->TypeForDecl
&& "previous declaration has no type");
4595 Decl
->TypeForDecl
= PrevDecl
->TypeForDecl
;
4596 assert(isa
<InjectedClassNameType
>(Decl
->TypeForDecl
));
4598 Type
*newType
= new (*this, alignof(InjectedClassNameType
))
4599 InjectedClassNameType(Decl
, TST
);
4600 Decl
->TypeForDecl
= newType
;
4601 Types
.push_back(newType
);
4603 return QualType(Decl
->TypeForDecl
, 0);
4606 /// getTypeDeclType - Return the unique reference to the type for the
4607 /// specified type declaration.
4608 QualType
ASTContext::getTypeDeclTypeSlow(const TypeDecl
*Decl
) const {
4609 assert(Decl
&& "Passed null for Decl param");
4610 assert(!Decl
->TypeForDecl
&& "TypeForDecl present in slow case");
4612 if (const auto *Typedef
= dyn_cast
<TypedefNameDecl
>(Decl
))
4613 return getTypedefType(Typedef
);
4615 assert(!isa
<TemplateTypeParmDecl
>(Decl
) &&
4616 "Template type parameter types are always available.");
4618 if (const auto *Record
= dyn_cast
<RecordDecl
>(Decl
)) {
4619 assert(Record
->isFirstDecl() && "struct/union has previous declaration");
4620 assert(!NeedsInjectedClassNameType(Record
));
4621 return getRecordType(Record
);
4622 } else if (const auto *Enum
= dyn_cast
<EnumDecl
>(Decl
)) {
4623 assert(Enum
->isFirstDecl() && "enum has previous declaration");
4624 return getEnumType(Enum
);
4625 } else if (const auto *Using
= dyn_cast
<UnresolvedUsingTypenameDecl
>(Decl
)) {
4626 return getUnresolvedUsingType(Using
);
4628 llvm_unreachable("TypeDecl without a type?");
4630 return QualType(Decl
->TypeForDecl
, 0);
4633 /// getTypedefType - Return the unique reference to the type for the
4634 /// specified typedef name decl.
4635 QualType
ASTContext::getTypedefType(const TypedefNameDecl
*Decl
,
4636 QualType Underlying
) const {
4637 if (!Decl
->TypeForDecl
) {
4638 if (Underlying
.isNull())
4639 Underlying
= Decl
->getUnderlyingType();
4640 auto *NewType
= new (*this, alignof(TypedefType
)) TypedefType(
4641 Type::Typedef
, Decl
, QualType(), getCanonicalType(Underlying
));
4642 Decl
->TypeForDecl
= NewType
;
4643 Types
.push_back(NewType
);
4644 return QualType(NewType
, 0);
4646 if (Underlying
.isNull() || Decl
->getUnderlyingType() == Underlying
)
4647 return QualType(Decl
->TypeForDecl
, 0);
4648 assert(hasSameType(Decl
->getUnderlyingType(), Underlying
));
4650 llvm::FoldingSetNodeID ID
;
4651 TypedefType::Profile(ID
, Decl
, Underlying
);
4653 void *InsertPos
= nullptr;
4654 if (TypedefType
*T
= TypedefTypes
.FindNodeOrInsertPos(ID
, InsertPos
)) {
4655 assert(!T
->typeMatchesDecl() &&
4656 "non-divergent case should be handled with TypeDecl");
4657 return QualType(T
, 0);
4660 void *Mem
= Allocate(TypedefType::totalSizeToAlloc
<QualType
>(true),
4661 alignof(TypedefType
));
4662 auto *NewType
= new (Mem
) TypedefType(Type::Typedef
, Decl
, Underlying
,
4663 getCanonicalType(Underlying
));
4664 TypedefTypes
.InsertNode(NewType
, InsertPos
);
4665 Types
.push_back(NewType
);
4666 return QualType(NewType
, 0);
4669 QualType
ASTContext::getUsingType(const UsingShadowDecl
*Found
,
4670 QualType Underlying
) const {
4671 llvm::FoldingSetNodeID ID
;
4672 UsingType::Profile(ID
, Found
, Underlying
);
4674 void *InsertPos
= nullptr;
4675 if (UsingType
*T
= UsingTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
4676 return QualType(T
, 0);
4678 const Type
*TypeForDecl
=
4679 cast
<TypeDecl
>(Found
->getTargetDecl())->getTypeForDecl();
4681 assert(!Underlying
.hasLocalQualifiers());
4682 QualType Canon
= Underlying
->getCanonicalTypeInternal();
4683 assert(TypeForDecl
->getCanonicalTypeInternal() == Canon
);
4685 if (Underlying
.getTypePtr() == TypeForDecl
)
4686 Underlying
= QualType();
4688 Allocate(UsingType::totalSizeToAlloc
<QualType
>(!Underlying
.isNull()),
4689 alignof(UsingType
));
4690 UsingType
*NewType
= new (Mem
) UsingType(Found
, Underlying
, Canon
);
4691 Types
.push_back(NewType
);
4692 UsingTypes
.InsertNode(NewType
, InsertPos
);
4693 return QualType(NewType
, 0);
4696 QualType
ASTContext::getRecordType(const RecordDecl
*Decl
) const {
4697 if (Decl
->TypeForDecl
) return QualType(Decl
->TypeForDecl
, 0);
4699 if (const RecordDecl
*PrevDecl
= Decl
->getPreviousDecl())
4700 if (PrevDecl
->TypeForDecl
)
4701 return QualType(Decl
->TypeForDecl
= PrevDecl
->TypeForDecl
, 0);
4703 auto *newType
= new (*this, alignof(RecordType
)) RecordType(Decl
);
4704 Decl
->TypeForDecl
= newType
;
4705 Types
.push_back(newType
);
4706 return QualType(newType
, 0);
4709 QualType
ASTContext::getEnumType(const EnumDecl
*Decl
) const {
4710 if (Decl
->TypeForDecl
) return QualType(Decl
->TypeForDecl
, 0);
4712 if (const EnumDecl
*PrevDecl
= Decl
->getPreviousDecl())
4713 if (PrevDecl
->TypeForDecl
)
4714 return QualType(Decl
->TypeForDecl
= PrevDecl
->TypeForDecl
, 0);
4716 auto *newType
= new (*this, alignof(EnumType
)) EnumType(Decl
);
4717 Decl
->TypeForDecl
= newType
;
4718 Types
.push_back(newType
);
4719 return QualType(newType
, 0);
4722 QualType
ASTContext::getUnresolvedUsingType(
4723 const UnresolvedUsingTypenameDecl
*Decl
) const {
4724 if (Decl
->TypeForDecl
)
4725 return QualType(Decl
->TypeForDecl
, 0);
4727 if (const UnresolvedUsingTypenameDecl
*CanonicalDecl
=
4728 Decl
->getCanonicalDecl())
4729 if (CanonicalDecl
->TypeForDecl
)
4730 return QualType(Decl
->TypeForDecl
= CanonicalDecl
->TypeForDecl
, 0);
4733 new (*this, alignof(UnresolvedUsingType
)) UnresolvedUsingType(Decl
);
4734 Decl
->TypeForDecl
= newType
;
4735 Types
.push_back(newType
);
4736 return QualType(newType
, 0);
4739 QualType
ASTContext::getAttributedType(attr::Kind attrKind
,
4740 QualType modifiedType
,
4741 QualType equivalentType
) const {
4742 llvm::FoldingSetNodeID id
;
4743 AttributedType::Profile(id
, attrKind
, modifiedType
, equivalentType
);
4745 void *insertPos
= nullptr;
4746 AttributedType
*type
= AttributedTypes
.FindNodeOrInsertPos(id
, insertPos
);
4747 if (type
) return QualType(type
, 0);
4749 QualType canon
= getCanonicalType(equivalentType
);
4750 type
= new (*this, alignof(AttributedType
))
4751 AttributedType(canon
, attrKind
, modifiedType
, equivalentType
);
4753 Types
.push_back(type
);
4754 AttributedTypes
.InsertNode(type
, insertPos
);
4756 return QualType(type
, 0);
4759 QualType
ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr
*BTFAttr
,
4761 llvm::FoldingSetNodeID ID
;
4762 BTFTagAttributedType::Profile(ID
, Wrapped
, BTFAttr
);
4764 void *InsertPos
= nullptr;
4765 BTFTagAttributedType
*Ty
=
4766 BTFTagAttributedTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4768 return QualType(Ty
, 0);
4770 QualType Canon
= getCanonicalType(Wrapped
);
4771 Ty
= new (*this, alignof(BTFTagAttributedType
))
4772 BTFTagAttributedType(Canon
, Wrapped
, BTFAttr
);
4774 Types
.push_back(Ty
);
4775 BTFTagAttributedTypes
.InsertNode(Ty
, InsertPos
);
4777 return QualType(Ty
, 0);
4780 /// Retrieve a substitution-result type.
4781 QualType
ASTContext::getSubstTemplateTypeParmType(
4782 QualType Replacement
, Decl
*AssociatedDecl
, unsigned Index
,
4783 std::optional
<unsigned> PackIndex
) const {
4784 llvm::FoldingSetNodeID ID
;
4785 SubstTemplateTypeParmType::Profile(ID
, Replacement
, AssociatedDecl
, Index
,
4787 void *InsertPos
= nullptr;
4788 SubstTemplateTypeParmType
*SubstParm
=
4789 SubstTemplateTypeParmTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4792 void *Mem
= Allocate(SubstTemplateTypeParmType::totalSizeToAlloc
<QualType
>(
4793 !Replacement
.isCanonical()),
4794 alignof(SubstTemplateTypeParmType
));
4795 SubstParm
= new (Mem
) SubstTemplateTypeParmType(Replacement
, AssociatedDecl
,
4797 Types
.push_back(SubstParm
);
4798 SubstTemplateTypeParmTypes
.InsertNode(SubstParm
, InsertPos
);
4801 return QualType(SubstParm
, 0);
4806 ASTContext::getSubstTemplateTypeParmPackType(Decl
*AssociatedDecl
,
4807 unsigned Index
, bool Final
,
4808 const TemplateArgument
&ArgPack
) {
4810 for (const auto &P
: ArgPack
.pack_elements())
4811 assert(P
.getKind() == TemplateArgument::Type
&& "Pack contains a non-type");
4814 llvm::FoldingSetNodeID ID
;
4815 SubstTemplateTypeParmPackType::Profile(ID
, AssociatedDecl
, Index
, Final
,
4817 void *InsertPos
= nullptr;
4818 if (SubstTemplateTypeParmPackType
*SubstParm
=
4819 SubstTemplateTypeParmPackTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
4820 return QualType(SubstParm
, 0);
4824 TemplateArgument CanonArgPack
= getCanonicalTemplateArgument(ArgPack
);
4825 if (!AssociatedDecl
->isCanonicalDecl() ||
4826 !CanonArgPack
.structurallyEquals(ArgPack
)) {
4827 Canon
= getSubstTemplateTypeParmPackType(
4828 AssociatedDecl
->getCanonicalDecl(), Index
, Final
, CanonArgPack
);
4829 [[maybe_unused
]] const auto *Nothing
=
4830 SubstTemplateTypeParmPackTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4835 auto *SubstParm
= new (*this, alignof(SubstTemplateTypeParmPackType
))
4836 SubstTemplateTypeParmPackType(Canon
, AssociatedDecl
, Index
, Final
,
4838 Types
.push_back(SubstParm
);
4839 SubstTemplateTypeParmPackTypes
.InsertNode(SubstParm
, InsertPos
);
4840 return QualType(SubstParm
, 0);
4843 /// Retrieve the template type parameter type for a template
4844 /// parameter or parameter pack with the given depth, index, and (optionally)
4846 QualType
ASTContext::getTemplateTypeParmType(unsigned Depth
, unsigned Index
,
4848 TemplateTypeParmDecl
*TTPDecl
) const {
4849 llvm::FoldingSetNodeID ID
;
4850 TemplateTypeParmType::Profile(ID
, Depth
, Index
, ParameterPack
, TTPDecl
);
4851 void *InsertPos
= nullptr;
4852 TemplateTypeParmType
*TypeParm
4853 = TemplateTypeParmTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4856 return QualType(TypeParm
, 0);
4859 QualType Canon
= getTemplateTypeParmType(Depth
, Index
, ParameterPack
);
4860 TypeParm
= new (*this, alignof(TemplateTypeParmType
))
4861 TemplateTypeParmType(TTPDecl
, Canon
);
4863 TemplateTypeParmType
*TypeCheck
4864 = TemplateTypeParmTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4865 assert(!TypeCheck
&& "Template type parameter canonical type broken");
4868 TypeParm
= new (*this, alignof(TemplateTypeParmType
))
4869 TemplateTypeParmType(Depth
, Index
, ParameterPack
);
4871 Types
.push_back(TypeParm
);
4872 TemplateTypeParmTypes
.InsertNode(TypeParm
, InsertPos
);
4874 return QualType(TypeParm
, 0);
4878 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name
,
4879 SourceLocation NameLoc
,
4880 const TemplateArgumentListInfo
&Args
,
4881 QualType Underlying
) const {
4882 assert(!Name
.getAsDependentTemplateName() &&
4883 "No dependent template names here!");
4885 getTemplateSpecializationType(Name
, Args
.arguments(), Underlying
);
4887 TypeSourceInfo
*DI
= CreateTypeSourceInfo(TST
);
4888 TemplateSpecializationTypeLoc TL
=
4889 DI
->getTypeLoc().castAs
<TemplateSpecializationTypeLoc
>();
4890 TL
.setTemplateKeywordLoc(SourceLocation());
4891 TL
.setTemplateNameLoc(NameLoc
);
4892 TL
.setLAngleLoc(Args
.getLAngleLoc());
4893 TL
.setRAngleLoc(Args
.getRAngleLoc());
4894 for (unsigned i
= 0, e
= TL
.getNumArgs(); i
!= e
; ++i
)
4895 TL
.setArgLocInfo(i
, Args
[i
].getLocInfo());
4900 ASTContext::getTemplateSpecializationType(TemplateName Template
,
4901 ArrayRef
<TemplateArgumentLoc
> Args
,
4902 QualType Underlying
) const {
4903 assert(!Template
.getAsDependentTemplateName() &&
4904 "No dependent template names here!");
4906 SmallVector
<TemplateArgument
, 4> ArgVec
;
4907 ArgVec
.reserve(Args
.size());
4908 for (const TemplateArgumentLoc
&Arg
: Args
)
4909 ArgVec
.push_back(Arg
.getArgument());
4911 return getTemplateSpecializationType(Template
, ArgVec
, Underlying
);
4915 static bool hasAnyPackExpansions(ArrayRef
<TemplateArgument
> Args
) {
4916 for (const TemplateArgument
&Arg
: Args
)
4917 if (Arg
.isPackExpansion())
4925 ASTContext::getTemplateSpecializationType(TemplateName Template
,
4926 ArrayRef
<TemplateArgument
> Args
,
4927 QualType Underlying
) const {
4928 assert(!Template
.getAsDependentTemplateName() &&
4929 "No dependent template names here!");
4930 // Look through qualified template names.
4931 if (QualifiedTemplateName
*QTN
= Template
.getAsQualifiedTemplateName())
4932 Template
= QTN
->getUnderlyingTemplate();
4934 const auto *TD
= Template
.getAsTemplateDecl();
4935 bool IsTypeAlias
= TD
&& TD
->isTypeAlias();
4937 if (!Underlying
.isNull())
4938 CanonType
= getCanonicalType(Underlying
);
4940 // We can get here with an alias template when the specialization contains
4941 // a pack expansion that does not match up with a parameter pack.
4942 assert((!IsTypeAlias
|| hasAnyPackExpansions(Args
)) &&
4943 "Caller must compute aliased type");
4944 IsTypeAlias
= false;
4945 CanonType
= getCanonicalTemplateSpecializationType(Template
, Args
);
4948 // Allocate the (non-canonical) template specialization type, but don't
4949 // try to unique it: these types typically have location information that
4950 // we don't unique and don't want to lose.
4951 void *Mem
= Allocate(sizeof(TemplateSpecializationType
) +
4952 sizeof(TemplateArgument
) * Args
.size() +
4953 (IsTypeAlias
? sizeof(QualType
) : 0),
4954 alignof(TemplateSpecializationType
));
4956 = new (Mem
) TemplateSpecializationType(Template
, Args
, CanonType
,
4957 IsTypeAlias
? Underlying
: QualType());
4959 Types
.push_back(Spec
);
4960 return QualType(Spec
, 0);
4963 QualType
ASTContext::getCanonicalTemplateSpecializationType(
4964 TemplateName Template
, ArrayRef
<TemplateArgument
> Args
) const {
4965 assert(!Template
.getAsDependentTemplateName() &&
4966 "No dependent template names here!");
4968 // Look through qualified template names.
4969 if (QualifiedTemplateName
*QTN
= Template
.getAsQualifiedTemplateName())
4970 Template
= TemplateName(QTN
->getUnderlyingTemplate());
4972 // Build the canonical template specialization type.
4973 TemplateName CanonTemplate
= getCanonicalTemplateName(Template
);
4974 bool AnyNonCanonArgs
= false;
4976 ::getCanonicalTemplateArguments(*this, Args
, AnyNonCanonArgs
);
4978 // Determine whether this canonical template specialization type already
4980 llvm::FoldingSetNodeID ID
;
4981 TemplateSpecializationType::Profile(ID
, CanonTemplate
,
4984 void *InsertPos
= nullptr;
4985 TemplateSpecializationType
*Spec
4986 = TemplateSpecializationTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
4989 // Allocate a new canonical template specialization type.
4990 void *Mem
= Allocate((sizeof(TemplateSpecializationType
) +
4991 sizeof(TemplateArgument
) * CanonArgs
.size()),
4992 alignof(TemplateSpecializationType
));
4993 Spec
= new (Mem
) TemplateSpecializationType(CanonTemplate
,
4995 QualType(), QualType());
4996 Types
.push_back(Spec
);
4997 TemplateSpecializationTypes
.InsertNode(Spec
, InsertPos
);
5000 assert(Spec
->isDependentType() &&
5001 "Non-dependent template-id type must have a canonical type");
5002 return QualType(Spec
, 0);
5005 QualType
ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword
,
5006 NestedNameSpecifier
*NNS
,
5008 TagDecl
*OwnedTagDecl
) const {
5009 llvm::FoldingSetNodeID ID
;
5010 ElaboratedType::Profile(ID
, Keyword
, NNS
, NamedType
, OwnedTagDecl
);
5012 void *InsertPos
= nullptr;
5013 ElaboratedType
*T
= ElaboratedTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5015 return QualType(T
, 0);
5017 QualType Canon
= NamedType
;
5018 if (!Canon
.isCanonical()) {
5019 Canon
= getCanonicalType(NamedType
);
5020 ElaboratedType
*CheckT
= ElaboratedTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5021 assert(!CheckT
&& "Elaborated canonical type broken");
5026 Allocate(ElaboratedType::totalSizeToAlloc
<TagDecl
*>(!!OwnedTagDecl
),
5027 alignof(ElaboratedType
));
5028 T
= new (Mem
) ElaboratedType(Keyword
, NNS
, NamedType
, Canon
, OwnedTagDecl
);
5031 ElaboratedTypes
.InsertNode(T
, InsertPos
);
5032 return QualType(T
, 0);
5036 ASTContext::getParenType(QualType InnerType
) const {
5037 llvm::FoldingSetNodeID ID
;
5038 ParenType::Profile(ID
, InnerType
);
5040 void *InsertPos
= nullptr;
5041 ParenType
*T
= ParenTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5043 return QualType(T
, 0);
5045 QualType Canon
= InnerType
;
5046 if (!Canon
.isCanonical()) {
5047 Canon
= getCanonicalType(InnerType
);
5048 ParenType
*CheckT
= ParenTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5049 assert(!CheckT
&& "Paren canonical type broken");
5053 T
= new (*this, alignof(ParenType
)) ParenType(InnerType
, Canon
);
5055 ParenTypes
.InsertNode(T
, InsertPos
);
5056 return QualType(T
, 0);
5060 ASTContext::getMacroQualifiedType(QualType UnderlyingTy
,
5061 const IdentifierInfo
*MacroII
) const {
5062 QualType Canon
= UnderlyingTy
;
5063 if (!Canon
.isCanonical())
5064 Canon
= getCanonicalType(UnderlyingTy
);
5066 auto *newType
= new (*this, alignof(MacroQualifiedType
))
5067 MacroQualifiedType(UnderlyingTy
, Canon
, MacroII
);
5068 Types
.push_back(newType
);
5069 return QualType(newType
, 0);
5072 QualType
ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword
,
5073 NestedNameSpecifier
*NNS
,
5074 const IdentifierInfo
*Name
,
5075 QualType Canon
) const {
5076 if (Canon
.isNull()) {
5077 NestedNameSpecifier
*CanonNNS
= getCanonicalNestedNameSpecifier(NNS
);
5078 if (CanonNNS
!= NNS
)
5079 Canon
= getDependentNameType(Keyword
, CanonNNS
, Name
);
5082 llvm::FoldingSetNodeID ID
;
5083 DependentNameType::Profile(ID
, Keyword
, NNS
, Name
);
5085 void *InsertPos
= nullptr;
5086 DependentNameType
*T
5087 = DependentNameTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5089 return QualType(T
, 0);
5091 T
= new (*this, alignof(DependentNameType
))
5092 DependentNameType(Keyword
, NNS
, Name
, Canon
);
5094 DependentNameTypes
.InsertNode(T
, InsertPos
);
5095 return QualType(T
, 0);
5098 QualType
ASTContext::getDependentTemplateSpecializationType(
5099 ElaboratedTypeKeyword Keyword
, NestedNameSpecifier
*NNS
,
5100 const IdentifierInfo
*Name
, ArrayRef
<TemplateArgumentLoc
> Args
) const {
5101 // TODO: avoid this copy
5102 SmallVector
<TemplateArgument
, 16> ArgCopy
;
5103 for (unsigned I
= 0, E
= Args
.size(); I
!= E
; ++I
)
5104 ArgCopy
.push_back(Args
[I
].getArgument());
5105 return getDependentTemplateSpecializationType(Keyword
, NNS
, Name
, ArgCopy
);
5109 ASTContext::getDependentTemplateSpecializationType(
5110 ElaboratedTypeKeyword Keyword
,
5111 NestedNameSpecifier
*NNS
,
5112 const IdentifierInfo
*Name
,
5113 ArrayRef
<TemplateArgument
> Args
) const {
5114 assert((!NNS
|| NNS
->isDependent()) &&
5115 "nested-name-specifier must be dependent");
5117 llvm::FoldingSetNodeID ID
;
5118 DependentTemplateSpecializationType::Profile(ID
, *this, Keyword
, NNS
,
5121 void *InsertPos
= nullptr;
5122 DependentTemplateSpecializationType
*T
5123 = DependentTemplateSpecializationTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5125 return QualType(T
, 0);
5127 NestedNameSpecifier
*CanonNNS
= getCanonicalNestedNameSpecifier(NNS
);
5129 ElaboratedTypeKeyword CanonKeyword
= Keyword
;
5130 if (Keyword
== ElaboratedTypeKeyword::None
)
5131 CanonKeyword
= ElaboratedTypeKeyword::Typename
;
5133 bool AnyNonCanonArgs
= false;
5135 ::getCanonicalTemplateArguments(*this, Args
, AnyNonCanonArgs
);
5138 if (AnyNonCanonArgs
|| CanonNNS
!= NNS
|| CanonKeyword
!= Keyword
) {
5139 Canon
= getDependentTemplateSpecializationType(CanonKeyword
, CanonNNS
,
5143 // Find the insert position again.
5144 [[maybe_unused
]] auto *Nothing
=
5145 DependentTemplateSpecializationTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5146 assert(!Nothing
&& "canonical type broken");
5149 void *Mem
= Allocate((sizeof(DependentTemplateSpecializationType
) +
5150 sizeof(TemplateArgument
) * Args
.size()),
5151 alignof(DependentTemplateSpecializationType
));
5152 T
= new (Mem
) DependentTemplateSpecializationType(Keyword
, NNS
,
5155 DependentTemplateSpecializationTypes
.InsertNode(T
, InsertPos
);
5156 return QualType(T
, 0);
5159 TemplateArgument
ASTContext::getInjectedTemplateArg(NamedDecl
*Param
) {
5160 TemplateArgument Arg
;
5161 if (const auto *TTP
= dyn_cast
<TemplateTypeParmDecl
>(Param
)) {
5162 QualType ArgType
= getTypeDeclType(TTP
);
5163 if (TTP
->isParameterPack())
5164 ArgType
= getPackExpansionType(ArgType
, std::nullopt
);
5166 Arg
= TemplateArgument(ArgType
);
5167 } else if (auto *NTTP
= dyn_cast
<NonTypeTemplateParmDecl
>(Param
)) {
5169 NTTP
->getType().getNonPackExpansionType().getNonLValueExprType(*this);
5170 // For class NTTPs, ensure we include the 'const' so the type matches that
5171 // of a real template argument.
5172 // FIXME: It would be more faithful to model this as something like an
5173 // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
5174 if (T
->isRecordType())
5176 Expr
*E
= new (*this) DeclRefExpr(
5177 *this, NTTP
, /*RefersToEnclosingVariableOrCapture*/ false, T
,
5178 Expr::getValueKindForType(NTTP
->getType()), NTTP
->getLocation());
5180 if (NTTP
->isParameterPack())
5182 PackExpansionExpr(DependentTy
, E
, NTTP
->getLocation(), std::nullopt
);
5183 Arg
= TemplateArgument(E
);
5185 auto *TTP
= cast
<TemplateTemplateParmDecl
>(Param
);
5186 if (TTP
->isParameterPack())
5187 Arg
= TemplateArgument(TemplateName(TTP
), std::optional
<unsigned>());
5189 Arg
= TemplateArgument(TemplateName(TTP
));
5192 if (Param
->isTemplateParameterPack())
5193 Arg
= TemplateArgument::CreatePackCopy(*this, Arg
);
5199 ASTContext::getInjectedTemplateArgs(const TemplateParameterList
*Params
,
5200 SmallVectorImpl
<TemplateArgument
> &Args
) {
5201 Args
.reserve(Args
.size() + Params
->size());
5203 for (NamedDecl
*Param
: *Params
)
5204 Args
.push_back(getInjectedTemplateArg(Param
));
5207 QualType
ASTContext::getPackExpansionType(QualType Pattern
,
5208 std::optional
<unsigned> NumExpansions
,
5209 bool ExpectPackInType
) {
5210 assert((!ExpectPackInType
|| Pattern
->containsUnexpandedParameterPack()) &&
5211 "Pack expansions must expand one or more parameter packs");
5213 llvm::FoldingSetNodeID ID
;
5214 PackExpansionType::Profile(ID
, Pattern
, NumExpansions
);
5216 void *InsertPos
= nullptr;
5217 PackExpansionType
*T
= PackExpansionTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5219 return QualType(T
, 0);
5222 if (!Pattern
.isCanonical()) {
5223 Canon
= getPackExpansionType(getCanonicalType(Pattern
), NumExpansions
,
5224 /*ExpectPackInType=*/false);
5226 // Find the insert position again, in case we inserted an element into
5227 // PackExpansionTypes and invalidated our insert position.
5228 PackExpansionTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5231 T
= new (*this, alignof(PackExpansionType
))
5232 PackExpansionType(Pattern
, Canon
, NumExpansions
);
5234 PackExpansionTypes
.InsertNode(T
, InsertPos
);
5235 return QualType(T
, 0);
5238 /// CmpProtocolNames - Comparison predicate for sorting protocols
5240 static int CmpProtocolNames(ObjCProtocolDecl
*const *LHS
,
5241 ObjCProtocolDecl
*const *RHS
) {
5242 return DeclarationName::compare((*LHS
)->getDeclName(), (*RHS
)->getDeclName());
5245 static bool areSortedAndUniqued(ArrayRef
<ObjCProtocolDecl
*> Protocols
) {
5246 if (Protocols
.empty()) return true;
5248 if (Protocols
[0]->getCanonicalDecl() != Protocols
[0])
5251 for (unsigned i
= 1; i
!= Protocols
.size(); ++i
)
5252 if (CmpProtocolNames(&Protocols
[i
- 1], &Protocols
[i
]) >= 0 ||
5253 Protocols
[i
]->getCanonicalDecl() != Protocols
[i
])
5259 SortAndUniqueProtocols(SmallVectorImpl
<ObjCProtocolDecl
*> &Protocols
) {
5260 // Sort protocols, keyed by name.
5261 llvm::array_pod_sort(Protocols
.begin(), Protocols
.end(), CmpProtocolNames
);
5264 for (ObjCProtocolDecl
*&P
: Protocols
)
5265 P
= P
->getCanonicalDecl();
5267 // Remove duplicates.
5268 auto ProtocolsEnd
= std::unique(Protocols
.begin(), Protocols
.end());
5269 Protocols
.erase(ProtocolsEnd
, Protocols
.end());
5272 QualType
ASTContext::getObjCObjectType(QualType BaseType
,
5273 ObjCProtocolDecl
* const *Protocols
,
5274 unsigned NumProtocols
) const {
5275 return getObjCObjectType(BaseType
, {},
5276 llvm::ArrayRef(Protocols
, NumProtocols
),
5277 /*isKindOf=*/false);
5280 QualType
ASTContext::getObjCObjectType(
5282 ArrayRef
<QualType
> typeArgs
,
5283 ArrayRef
<ObjCProtocolDecl
*> protocols
,
5284 bool isKindOf
) const {
5285 // If the base type is an interface and there aren't any protocols or
5286 // type arguments to add, then the interface type will do just fine.
5287 if (typeArgs
.empty() && protocols
.empty() && !isKindOf
&&
5288 isa
<ObjCInterfaceType
>(baseType
))
5291 // Look in the folding set for an existing type.
5292 llvm::FoldingSetNodeID ID
;
5293 ObjCObjectTypeImpl::Profile(ID
, baseType
, typeArgs
, protocols
, isKindOf
);
5294 void *InsertPos
= nullptr;
5295 if (ObjCObjectType
*QT
= ObjCObjectTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
5296 return QualType(QT
, 0);
5298 // Determine the type arguments to be used for canonicalization,
5299 // which may be explicitly specified here or written on the base
5301 ArrayRef
<QualType
> effectiveTypeArgs
= typeArgs
;
5302 if (effectiveTypeArgs
.empty()) {
5303 if (const auto *baseObject
= baseType
->getAs
<ObjCObjectType
>())
5304 effectiveTypeArgs
= baseObject
->getTypeArgs();
5307 // Build the canonical type, which has the canonical base type and a
5308 // sorted-and-uniqued list of protocols and the type arguments
5311 bool typeArgsAreCanonical
= llvm::all_of(
5312 effectiveTypeArgs
, [&](QualType type
) { return type
.isCanonical(); });
5313 bool protocolsSorted
= areSortedAndUniqued(protocols
);
5314 if (!typeArgsAreCanonical
|| !protocolsSorted
|| !baseType
.isCanonical()) {
5315 // Determine the canonical type arguments.
5316 ArrayRef
<QualType
> canonTypeArgs
;
5317 SmallVector
<QualType
, 4> canonTypeArgsVec
;
5318 if (!typeArgsAreCanonical
) {
5319 canonTypeArgsVec
.reserve(effectiveTypeArgs
.size());
5320 for (auto typeArg
: effectiveTypeArgs
)
5321 canonTypeArgsVec
.push_back(getCanonicalType(typeArg
));
5322 canonTypeArgs
= canonTypeArgsVec
;
5324 canonTypeArgs
= effectiveTypeArgs
;
5327 ArrayRef
<ObjCProtocolDecl
*> canonProtocols
;
5328 SmallVector
<ObjCProtocolDecl
*, 8> canonProtocolsVec
;
5329 if (!protocolsSorted
) {
5330 canonProtocolsVec
.append(protocols
.begin(), protocols
.end());
5331 SortAndUniqueProtocols(canonProtocolsVec
);
5332 canonProtocols
= canonProtocolsVec
;
5334 canonProtocols
= protocols
;
5337 canonical
= getObjCObjectType(getCanonicalType(baseType
), canonTypeArgs
,
5338 canonProtocols
, isKindOf
);
5340 // Regenerate InsertPos.
5341 ObjCObjectTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5344 unsigned size
= sizeof(ObjCObjectTypeImpl
);
5345 size
+= typeArgs
.size() * sizeof(QualType
);
5346 size
+= protocols
.size() * sizeof(ObjCProtocolDecl
*);
5347 void *mem
= Allocate(size
, alignof(ObjCObjectTypeImpl
));
5349 new (mem
) ObjCObjectTypeImpl(canonical
, baseType
, typeArgs
, protocols
,
5353 ObjCObjectTypes
.InsertNode(T
, InsertPos
);
5354 return QualType(T
, 0);
5357 /// Apply Objective-C protocol qualifiers to the given type.
5358 /// If this is for the canonical type of a type parameter, we can apply
5359 /// protocol qualifiers on the ObjCObjectPointerType.
5361 ASTContext::applyObjCProtocolQualifiers(QualType type
,
5362 ArrayRef
<ObjCProtocolDecl
*> protocols
, bool &hasError
,
5363 bool allowOnPointerType
) const {
5366 if (const auto *objT
= dyn_cast
<ObjCTypeParamType
>(type
.getTypePtr())) {
5367 return getObjCTypeParamType(objT
->getDecl(), protocols
);
5370 // Apply protocol qualifiers to ObjCObjectPointerType.
5371 if (allowOnPointerType
) {
5372 if (const auto *objPtr
=
5373 dyn_cast
<ObjCObjectPointerType
>(type
.getTypePtr())) {
5374 const ObjCObjectType
*objT
= objPtr
->getObjectType();
5375 // Merge protocol lists and construct ObjCObjectType.
5376 SmallVector
<ObjCProtocolDecl
*, 8> protocolsVec
;
5377 protocolsVec
.append(objT
->qual_begin(),
5379 protocolsVec
.append(protocols
.begin(), protocols
.end());
5380 ArrayRef
<ObjCProtocolDecl
*> protocols
= protocolsVec
;
5381 type
= getObjCObjectType(
5382 objT
->getBaseType(),
5383 objT
->getTypeArgsAsWritten(),
5385 objT
->isKindOfTypeAsWritten());
5386 return getObjCObjectPointerType(type
);
5390 // Apply protocol qualifiers to ObjCObjectType.
5391 if (const auto *objT
= dyn_cast
<ObjCObjectType
>(type
.getTypePtr())){
5392 // FIXME: Check for protocols to which the class type is already
5393 // known to conform.
5395 return getObjCObjectType(objT
->getBaseType(),
5396 objT
->getTypeArgsAsWritten(),
5398 objT
->isKindOfTypeAsWritten());
5401 // If the canonical type is ObjCObjectType, ...
5402 if (type
->isObjCObjectType()) {
5403 // Silently overwrite any existing protocol qualifiers.
5404 // TODO: determine whether that's the right thing to do.
5406 // FIXME: Check for protocols to which the class type is already
5407 // known to conform.
5408 return getObjCObjectType(type
, {}, protocols
, false);
5411 // id<protocol-list>
5412 if (type
->isObjCIdType()) {
5413 const auto *objPtr
= type
->castAs
<ObjCObjectPointerType
>();
5414 type
= getObjCObjectType(ObjCBuiltinIdTy
, {}, protocols
,
5415 objPtr
->isKindOfType());
5416 return getObjCObjectPointerType(type
);
5419 // Class<protocol-list>
5420 if (type
->isObjCClassType()) {
5421 const auto *objPtr
= type
->castAs
<ObjCObjectPointerType
>();
5422 type
= getObjCObjectType(ObjCBuiltinClassTy
, {}, protocols
,
5423 objPtr
->isKindOfType());
5424 return getObjCObjectPointerType(type
);
5432 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl
*Decl
,
5433 ArrayRef
<ObjCProtocolDecl
*> protocols
) const {
5434 // Look in the folding set for an existing type.
5435 llvm::FoldingSetNodeID ID
;
5436 ObjCTypeParamType::Profile(ID
, Decl
, Decl
->getUnderlyingType(), protocols
);
5437 void *InsertPos
= nullptr;
5438 if (ObjCTypeParamType
*TypeParam
=
5439 ObjCTypeParamTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
5440 return QualType(TypeParam
, 0);
5442 // We canonicalize to the underlying type.
5443 QualType Canonical
= getCanonicalType(Decl
->getUnderlyingType());
5444 if (!protocols
.empty()) {
5445 // Apply the protocol qualifers.
5447 Canonical
= getCanonicalType(applyObjCProtocolQualifiers(
5448 Canonical
, protocols
, hasError
, true /*allowOnPointerType*/));
5449 assert(!hasError
&& "Error when apply protocol qualifier to bound type");
5452 unsigned size
= sizeof(ObjCTypeParamType
);
5453 size
+= protocols
.size() * sizeof(ObjCProtocolDecl
*);
5454 void *mem
= Allocate(size
, alignof(ObjCTypeParamType
));
5455 auto *newType
= new (mem
) ObjCTypeParamType(Decl
, Canonical
, protocols
);
5457 Types
.push_back(newType
);
5458 ObjCTypeParamTypes
.InsertNode(newType
, InsertPos
);
5459 return QualType(newType
, 0);
5462 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl
*Orig
,
5463 ObjCTypeParamDecl
*New
) const {
5464 New
->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig
->getUnderlyingType()));
5465 // Update TypeForDecl after updating TypeSourceInfo.
5466 auto NewTypeParamTy
= cast
<ObjCTypeParamType
>(New
->getTypeForDecl());
5467 SmallVector
<ObjCProtocolDecl
*, 8> protocols
;
5468 protocols
.append(NewTypeParamTy
->qual_begin(), NewTypeParamTy
->qual_end());
5469 QualType UpdatedTy
= getObjCTypeParamType(New
, protocols
);
5470 New
->setTypeForDecl(UpdatedTy
.getTypePtr());
5473 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
5474 /// protocol list adopt all protocols in QT's qualified-id protocol
5476 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT
,
5477 ObjCInterfaceDecl
*IC
) {
5478 if (!QT
->isObjCQualifiedIdType())
5481 if (const auto *OPT
= QT
->getAs
<ObjCObjectPointerType
>()) {
5482 // If both the right and left sides have qualifiers.
5483 for (auto *Proto
: OPT
->quals()) {
5484 if (!IC
->ClassImplementsProtocol(Proto
, false))
5492 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
5493 /// QT's qualified-id protocol list adopt all protocols in IDecl's list
5495 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT
,
5496 ObjCInterfaceDecl
*IDecl
) {
5497 if (!QT
->isObjCQualifiedIdType())
5499 const auto *OPT
= QT
->getAs
<ObjCObjectPointerType
>();
5502 if (!IDecl
->hasDefinition())
5504 llvm::SmallPtrSet
<ObjCProtocolDecl
*, 8> InheritedProtocols
;
5505 CollectInheritedProtocols(IDecl
, InheritedProtocols
);
5506 if (InheritedProtocols
.empty())
5508 // Check that if every protocol in list of id<plist> conforms to a protocol
5509 // of IDecl's, then bridge casting is ok.
5510 bool Conforms
= false;
5511 for (auto *Proto
: OPT
->quals()) {
5513 for (auto *PI
: InheritedProtocols
) {
5514 if (ProtocolCompatibleWithProtocol(Proto
, PI
)) {
5525 for (auto *PI
: InheritedProtocols
) {
5526 // If both the right and left sides have qualifiers.
5527 bool Adopts
= false;
5528 for (auto *Proto
: OPT
->quals()) {
5529 // return 'true' if 'PI' is in the inheritance hierarchy of Proto
5530 if ((Adopts
= ProtocolCompatibleWithProtocol(PI
, Proto
)))
5539 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
5540 /// the given object type.
5541 QualType
ASTContext::getObjCObjectPointerType(QualType ObjectT
) const {
5542 llvm::FoldingSetNodeID ID
;
5543 ObjCObjectPointerType::Profile(ID
, ObjectT
);
5545 void *InsertPos
= nullptr;
5546 if (ObjCObjectPointerType
*QT
=
5547 ObjCObjectPointerTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
5548 return QualType(QT
, 0);
5550 // Find the canonical object type.
5552 if (!ObjectT
.isCanonical()) {
5553 Canonical
= getObjCObjectPointerType(getCanonicalType(ObjectT
));
5555 // Regenerate InsertPos.
5556 ObjCObjectPointerTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5561 Allocate(sizeof(ObjCObjectPointerType
), alignof(ObjCObjectPointerType
));
5563 new (Mem
) ObjCObjectPointerType(Canonical
, ObjectT
);
5565 Types
.push_back(QType
);
5566 ObjCObjectPointerTypes
.InsertNode(QType
, InsertPos
);
5567 return QualType(QType
, 0);
5570 /// getObjCInterfaceType - Return the unique reference to the type for the
5571 /// specified ObjC interface decl. The list of protocols is optional.
5572 QualType
ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl
*Decl
,
5573 ObjCInterfaceDecl
*PrevDecl
) const {
5574 if (Decl
->TypeForDecl
)
5575 return QualType(Decl
->TypeForDecl
, 0);
5578 assert(PrevDecl
->TypeForDecl
&& "previous decl has no TypeForDecl");
5579 Decl
->TypeForDecl
= PrevDecl
->TypeForDecl
;
5580 return QualType(PrevDecl
->TypeForDecl
, 0);
5583 // Prefer the definition, if there is one.
5584 if (const ObjCInterfaceDecl
*Def
= Decl
->getDefinition())
5587 void *Mem
= Allocate(sizeof(ObjCInterfaceType
), alignof(ObjCInterfaceType
));
5588 auto *T
= new (Mem
) ObjCInterfaceType(Decl
);
5589 Decl
->TypeForDecl
= T
;
5591 return QualType(T
, 0);
5594 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
5595 /// TypeOfExprType AST's (since expression's are never shared). For example,
5596 /// multiple declarations that refer to "typeof(x)" all contain different
5597 /// DeclRefExpr's. This doesn't effect the type checker, since it operates
5598 /// on canonical type's (which are always unique).
5599 QualType
ASTContext::getTypeOfExprType(Expr
*tofExpr
, TypeOfKind Kind
) const {
5600 TypeOfExprType
*toe
;
5601 if (tofExpr
->isTypeDependent()) {
5602 llvm::FoldingSetNodeID ID
;
5603 DependentTypeOfExprType::Profile(ID
, *this, tofExpr
,
5604 Kind
== TypeOfKind::Unqualified
);
5606 void *InsertPos
= nullptr;
5607 DependentTypeOfExprType
*Canon
=
5608 DependentTypeOfExprTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5610 // We already have a "canonical" version of an identical, dependent
5611 // typeof(expr) type. Use that as our canonical type.
5612 toe
= new (*this, alignof(TypeOfExprType
))
5613 TypeOfExprType(tofExpr
, Kind
, QualType((TypeOfExprType
*)Canon
, 0));
5615 // Build a new, canonical typeof(expr) type.
5616 Canon
= new (*this, alignof(DependentTypeOfExprType
))
5617 DependentTypeOfExprType(tofExpr
, Kind
);
5618 DependentTypeOfExprTypes
.InsertNode(Canon
, InsertPos
);
5622 QualType Canonical
= getCanonicalType(tofExpr
->getType());
5623 toe
= new (*this, alignof(TypeOfExprType
))
5624 TypeOfExprType(tofExpr
, Kind
, Canonical
);
5626 Types
.push_back(toe
);
5627 return QualType(toe
, 0);
5630 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
5631 /// TypeOfType nodes. The only motivation to unique these nodes would be
5632 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
5633 /// an issue. This doesn't affect the type checker, since it operates
5634 /// on canonical types (which are always unique).
5635 QualType
ASTContext::getTypeOfType(QualType tofType
, TypeOfKind Kind
) const {
5636 QualType Canonical
= getCanonicalType(tofType
);
5638 new (*this, alignof(TypeOfType
)) TypeOfType(tofType
, Canonical
, Kind
);
5639 Types
.push_back(tot
);
5640 return QualType(tot
, 0);
5643 /// getReferenceQualifiedType - Given an expr, will return the type for
5644 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
5645 /// and class member access into account.
5646 QualType
ASTContext::getReferenceQualifiedType(const Expr
*E
) const {
5647 // C++11 [dcl.type.simple]p4:
5649 QualType T
= E
->getType();
5650 switch (E
->getValueKind()) {
5651 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
5654 return getRValueReferenceType(T
);
5655 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
5658 return getLValueReferenceType(T
);
5659 // - otherwise, decltype(e) is the type of e.
5663 llvm_unreachable("Unknown value kind");
5666 /// Unlike many "get<Type>" functions, we don't unique DecltypeType
5667 /// nodes. This would never be helpful, since each such type has its own
5668 /// expression, and would not give a significant memory saving, since there
5669 /// is an Expr tree under each such type.
5670 QualType
ASTContext::getDecltypeType(Expr
*e
, QualType UnderlyingType
) const {
5673 // C++11 [temp.type]p2:
5674 // If an expression e involves a template parameter, decltype(e) denotes a
5675 // unique dependent type. Two such decltype-specifiers refer to the same
5676 // type only if their expressions are equivalent (14.5.6.1).
5677 if (e
->isInstantiationDependent()) {
5678 llvm::FoldingSetNodeID ID
;
5679 DependentDecltypeType::Profile(ID
, *this, e
);
5681 void *InsertPos
= nullptr;
5682 DependentDecltypeType
*Canon
5683 = DependentDecltypeTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5685 // Build a new, canonical decltype(expr) type.
5686 Canon
= new (*this, alignof(DependentDecltypeType
))
5687 DependentDecltypeType(e
, DependentTy
);
5688 DependentDecltypeTypes
.InsertNode(Canon
, InsertPos
);
5690 dt
= new (*this, alignof(DecltypeType
))
5691 DecltypeType(e
, UnderlyingType
, QualType((DecltypeType
*)Canon
, 0));
5693 dt
= new (*this, alignof(DecltypeType
))
5694 DecltypeType(e
, UnderlyingType
, getCanonicalType(UnderlyingType
));
5696 Types
.push_back(dt
);
5697 return QualType(dt
, 0);
5700 /// getUnaryTransformationType - We don't unique these, since the memory
5701 /// savings are minimal and these are rare.
5702 QualType
ASTContext::getUnaryTransformType(QualType BaseType
,
5703 QualType UnderlyingType
,
5704 UnaryTransformType::UTTKind Kind
)
5706 UnaryTransformType
*ut
= nullptr;
5708 if (BaseType
->isDependentType()) {
5709 // Look in the folding set for an existing type.
5710 llvm::FoldingSetNodeID ID
;
5711 DependentUnaryTransformType::Profile(ID
, getCanonicalType(BaseType
), Kind
);
5713 void *InsertPos
= nullptr;
5714 DependentUnaryTransformType
*Canon
5715 = DependentUnaryTransformTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5718 // Build a new, canonical __underlying_type(type) type.
5719 Canon
= new (*this, alignof(DependentUnaryTransformType
))
5720 DependentUnaryTransformType(*this, getCanonicalType(BaseType
), Kind
);
5721 DependentUnaryTransformTypes
.InsertNode(Canon
, InsertPos
);
5723 ut
= new (*this, alignof(UnaryTransformType
))
5724 UnaryTransformType(BaseType
, QualType(), Kind
, QualType(Canon
, 0));
5726 QualType CanonType
= getCanonicalType(UnderlyingType
);
5727 ut
= new (*this, alignof(UnaryTransformType
))
5728 UnaryTransformType(BaseType
, UnderlyingType
, Kind
, CanonType
);
5730 Types
.push_back(ut
);
5731 return QualType(ut
, 0);
5734 QualType
ASTContext::getAutoTypeInternal(
5735 QualType DeducedType
, AutoTypeKeyword Keyword
, bool IsDependent
,
5736 bool IsPack
, ConceptDecl
*TypeConstraintConcept
,
5737 ArrayRef
<TemplateArgument
> TypeConstraintArgs
, bool IsCanon
) const {
5738 if (DeducedType
.isNull() && Keyword
== AutoTypeKeyword::Auto
&&
5739 !TypeConstraintConcept
&& !IsDependent
)
5740 return getAutoDeductType();
5742 // Look in the folding set for an existing type.
5743 void *InsertPos
= nullptr;
5744 llvm::FoldingSetNodeID ID
;
5745 AutoType::Profile(ID
, *this, DeducedType
, Keyword
, IsDependent
,
5746 TypeConstraintConcept
, TypeConstraintArgs
);
5747 if (AutoType
*AT
= AutoTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
5748 return QualType(AT
, 0);
5752 if (!DeducedType
.isNull()) {
5753 Canon
= DeducedType
.getCanonicalType();
5754 } else if (TypeConstraintConcept
) {
5755 bool AnyNonCanonArgs
= false;
5756 ConceptDecl
*CanonicalConcept
= TypeConstraintConcept
->getCanonicalDecl();
5757 auto CanonicalConceptArgs
= ::getCanonicalTemplateArguments(
5758 *this, TypeConstraintArgs
, AnyNonCanonArgs
);
5759 if (CanonicalConcept
!= TypeConstraintConcept
|| AnyNonCanonArgs
) {
5761 getAutoTypeInternal(QualType(), Keyword
, IsDependent
, IsPack
,
5762 CanonicalConcept
, CanonicalConceptArgs
, true);
5763 // Find the insert position again.
5764 [[maybe_unused
]] auto *Nothing
=
5765 AutoTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5766 assert(!Nothing
&& "canonical type broken");
5771 void *Mem
= Allocate(sizeof(AutoType
) +
5772 sizeof(TemplateArgument
) * TypeConstraintArgs
.size(),
5774 auto *AT
= new (Mem
) AutoType(
5775 DeducedType
, Keyword
,
5776 (IsDependent
? TypeDependence::DependentInstantiation
5777 : TypeDependence::None
) |
5778 (IsPack
? TypeDependence::UnexpandedPack
: TypeDependence::None
),
5779 Canon
, TypeConstraintConcept
, TypeConstraintArgs
);
5780 Types
.push_back(AT
);
5781 AutoTypes
.InsertNode(AT
, InsertPos
);
5782 return QualType(AT
, 0);
5785 /// getAutoType - Return the uniqued reference to the 'auto' type which has been
5786 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the
5787 /// canonical deduced-but-dependent 'auto' type.
5789 ASTContext::getAutoType(QualType DeducedType
, AutoTypeKeyword Keyword
,
5790 bool IsDependent
, bool IsPack
,
5791 ConceptDecl
*TypeConstraintConcept
,
5792 ArrayRef
<TemplateArgument
> TypeConstraintArgs
) const {
5793 assert((!IsPack
|| IsDependent
) && "only use IsPack for a dependent pack");
5794 assert((!IsDependent
|| DeducedType
.isNull()) &&
5795 "A dependent auto should be undeduced");
5796 return getAutoTypeInternal(DeducedType
, Keyword
, IsDependent
, IsPack
,
5797 TypeConstraintConcept
, TypeConstraintArgs
);
5800 QualType
ASTContext::getUnconstrainedType(QualType T
) const {
5801 QualType CanonT
= T
.getCanonicalType();
5803 // Remove a type-constraint from a top-level auto or decltype(auto).
5804 if (auto *AT
= CanonT
->getAs
<AutoType
>()) {
5805 if (!AT
->isConstrained())
5807 return getQualifiedType(getAutoType(QualType(), AT
->getKeyword(), false,
5808 AT
->containsUnexpandedParameterPack()),
5812 // FIXME: We only support constrained auto at the top level in the type of a
5813 // non-type template parameter at the moment. Once we lift that restriction,
5814 // we'll need to recursively build types containing auto here.
5815 assert(!CanonT
->getContainedAutoType() ||
5816 !CanonT
->getContainedAutoType()->isConstrained());
5820 /// Return the uniqued reference to the deduced template specialization type
5821 /// which has been deduced to the given type, or to the canonical undeduced
5822 /// such type, or the canonical deduced-but-dependent such type.
5823 QualType
ASTContext::getDeducedTemplateSpecializationType(
5824 TemplateName Template
, QualType DeducedType
, bool IsDependent
) const {
5825 // Look in the folding set for an existing type.
5826 void *InsertPos
= nullptr;
5827 llvm::FoldingSetNodeID ID
;
5828 DeducedTemplateSpecializationType::Profile(ID
, Template
, DeducedType
,
5830 if (DeducedTemplateSpecializationType
*DTST
=
5831 DeducedTemplateSpecializationTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
5832 return QualType(DTST
, 0);
5834 auto *DTST
= new (*this, alignof(DeducedTemplateSpecializationType
))
5835 DeducedTemplateSpecializationType(Template
, DeducedType
, IsDependent
);
5836 llvm::FoldingSetNodeID TempID
;
5837 DTST
->Profile(TempID
);
5838 assert(ID
== TempID
&& "ID does not match");
5839 Types
.push_back(DTST
);
5840 DeducedTemplateSpecializationTypes
.InsertNode(DTST
, InsertPos
);
5841 return QualType(DTST
, 0);
5844 /// getAtomicType - Return the uniqued reference to the atomic type for
5845 /// the given value type.
5846 QualType
ASTContext::getAtomicType(QualType T
) const {
5847 // Unique pointers, to guarantee there is only one pointer of a particular
5849 llvm::FoldingSetNodeID ID
;
5850 AtomicType::Profile(ID
, T
);
5852 void *InsertPos
= nullptr;
5853 if (AtomicType
*AT
= AtomicTypes
.FindNodeOrInsertPos(ID
, InsertPos
))
5854 return QualType(AT
, 0);
5856 // If the atomic value type isn't canonical, this won't be a canonical type
5857 // either, so fill in the canonical type field.
5859 if (!T
.isCanonical()) {
5860 Canonical
= getAtomicType(getCanonicalType(T
));
5862 // Get the new insert position for the node we care about.
5863 AtomicType
*NewIP
= AtomicTypes
.FindNodeOrInsertPos(ID
, InsertPos
);
5864 assert(!NewIP
&& "Shouldn't be in the map!"); (void)NewIP
;
5866 auto *New
= new (*this, alignof(AtomicType
)) AtomicType(T
, Canonical
);
5867 Types
.push_back(New
);
5868 AtomicTypes
.InsertNode(New
, InsertPos
);
5869 return QualType(New
, 0);
5872 /// getAutoDeductType - Get type pattern for deducing against 'auto'.
5873 QualType
ASTContext::getAutoDeductType() const {
5874 if (AutoDeductTy
.isNull())
5875 AutoDeductTy
= QualType(new (*this, alignof(AutoType
))
5876 AutoType(QualType(), AutoTypeKeyword::Auto
,
5877 TypeDependence::None
, QualType(),
5878 /*concept*/ nullptr, /*args*/ {}),
5880 return AutoDeductTy
;
5883 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
5884 QualType
ASTContext::getAutoRRefDeductType() const {
5885 if (AutoRRefDeductTy
.isNull())
5886 AutoRRefDeductTy
= getRValueReferenceType(getAutoDeductType());
5887 assert(!AutoRRefDeductTy
.isNull() && "can't build 'auto &&' pattern");
5888 return AutoRRefDeductTy
;
5891 /// getTagDeclType - Return the unique reference to the type for the
5892 /// specified TagDecl (struct/union/class/enum) decl.
5893 QualType
ASTContext::getTagDeclType(const TagDecl
*Decl
) const {
5895 // FIXME: What is the design on getTagDeclType when it requires casting
5896 // away const? mutable?
5897 return getTypeDeclType(const_cast<TagDecl
*>(Decl
));
5900 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
5901 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
5902 /// needs to agree with the definition in <stddef.h>.
5903 CanQualType
ASTContext::getSizeType() const {
5904 return getFromTargetType(Target
->getSizeType());
5907 /// Return the unique signed counterpart of the integer type
5908 /// corresponding to size_t.
5909 CanQualType
ASTContext::getSignedSizeType() const {
5910 return getFromTargetType(Target
->getSignedSizeType());
5913 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
5914 CanQualType
ASTContext::getIntMaxType() const {
5915 return getFromTargetType(Target
->getIntMaxType());
5918 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
5919 CanQualType
ASTContext::getUIntMaxType() const {
5920 return getFromTargetType(Target
->getUIntMaxType());
5923 /// getSignedWCharType - Return the type of "signed wchar_t".
5924 /// Used when in C++, as a GCC extension.
5925 QualType
ASTContext::getSignedWCharType() const {
5926 // FIXME: derive from "Target" ?
5930 /// getUnsignedWCharType - Return the type of "unsigned wchar_t".
5931 /// Used when in C++, as a GCC extension.
5932 QualType
ASTContext::getUnsignedWCharType() const {
5933 // FIXME: derive from "Target" ?
5934 return UnsignedIntTy
;
5937 QualType
ASTContext::getIntPtrType() const {
5938 return getFromTargetType(Target
->getIntPtrType());
5941 QualType
ASTContext::getUIntPtrType() const {
5942 return getCorrespondingUnsignedType(getIntPtrType());
5945 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
5946 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
5947 QualType
ASTContext::getPointerDiffType() const {
5948 return getFromTargetType(Target
->getPtrDiffType(LangAS::Default
));
5951 /// Return the unique unsigned counterpart of "ptrdiff_t"
5952 /// integer type. The standard (C11 7.21.6.1p7) refers to this type
5953 /// in the definition of %tu format specifier.
5954 QualType
ASTContext::getUnsignedPointerDiffType() const {
5955 return getFromTargetType(Target
->getUnsignedPtrDiffType(LangAS::Default
));
5958 /// Return the unique type for "pid_t" defined in
5959 /// <sys/types.h>. We need this to compute the correct type for vfork().
5960 QualType
ASTContext::getProcessIDType() const {
5961 return getFromTargetType(Target
->getProcessIDType());
5964 //===----------------------------------------------------------------------===//
5966 //===----------------------------------------------------------------------===//
5968 CanQualType
ASTContext::getCanonicalParamType(QualType T
) const {
5969 // Push qualifiers into arrays, and then discard any remaining
5971 T
= getCanonicalType(T
);
5972 T
= getVariableArrayDecayedType(T
);
5973 const Type
*Ty
= T
.getTypePtr();
5975 if (isa
<ArrayType
>(Ty
)) {
5976 Result
= getArrayDecayedType(QualType(Ty
,0));
5977 } else if (isa
<FunctionType
>(Ty
)) {
5978 Result
= getPointerType(QualType(Ty
, 0));
5980 Result
= QualType(Ty
, 0);
5983 return CanQualType::CreateUnsafe(Result
);
5986 QualType
ASTContext::getUnqualifiedArrayType(QualType type
,
5987 Qualifiers
&quals
) {
5988 SplitQualType splitType
= type
.getSplitUnqualifiedType();
5990 // FIXME: getSplitUnqualifiedType() actually walks all the way to
5991 // the unqualified desugared type and then drops it on the floor.
5992 // We then have to strip that sugar back off with
5993 // getUnqualifiedDesugaredType(), which is silly.
5995 dyn_cast
<ArrayType
>(splitType
.Ty
->getUnqualifiedDesugaredType());
5997 // If we don't have an array, just use the results in splitType.
5999 quals
= splitType
.Quals
;
6000 return QualType(splitType
.Ty
, 0);
6003 // Otherwise, recurse on the array's element type.
6004 QualType elementType
= AT
->getElementType();
6005 QualType unqualElementType
= getUnqualifiedArrayType(elementType
, quals
);
6007 // If that didn't change the element type, AT has no qualifiers, so we
6008 // can just use the results in splitType.
6009 if (elementType
== unqualElementType
) {
6010 assert(quals
.empty()); // from the recursive call
6011 quals
= splitType
.Quals
;
6012 return QualType(splitType
.Ty
, 0);
6015 // Otherwise, add in the qualifiers from the outermost type, then
6016 // build the type back up.
6017 quals
.addConsistentQualifiers(splitType
.Quals
);
6019 if (const auto *CAT
= dyn_cast
<ConstantArrayType
>(AT
)) {
6020 return getConstantArrayType(unqualElementType
, CAT
->getSize(),
6021 CAT
->getSizeExpr(), CAT
->getSizeModifier(), 0);
6024 if (const auto *IAT
= dyn_cast
<IncompleteArrayType
>(AT
)) {
6025 return getIncompleteArrayType(unqualElementType
, IAT
->getSizeModifier(), 0);
6028 if (const auto *VAT
= dyn_cast
<VariableArrayType
>(AT
)) {
6029 return getVariableArrayType(unqualElementType
,
6031 VAT
->getSizeModifier(),
6032 VAT
->getIndexTypeCVRQualifiers(),
6033 VAT
->getBracketsRange());
6036 const auto *DSAT
= cast
<DependentSizedArrayType
>(AT
);
6037 return getDependentSizedArrayType(unqualElementType
, DSAT
->getSizeExpr(),
6038 DSAT
->getSizeModifier(), 0,
6042 /// Attempt to unwrap two types that may both be array types with the same bound
6043 /// (or both be array types of unknown bound) for the purpose of comparing the
6044 /// cv-decomposition of two types per C++ [conv.qual].
6046 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
6047 /// C++20 [conv.qual], if permitted by the current language mode.
6048 void ASTContext::UnwrapSimilarArrayTypes(QualType
&T1
, QualType
&T2
,
6049 bool AllowPiMismatch
) {
6051 auto *AT1
= getAsArrayType(T1
);
6055 auto *AT2
= getAsArrayType(T2
);
6059 // If we don't have two array types with the same constant bound nor two
6060 // incomplete array types, we've unwrapped everything we can.
6061 // C++20 also permits one type to be a constant array type and the other
6062 // to be an incomplete array type.
6063 // FIXME: Consider also unwrapping array of unknown bound and VLA.
6064 if (auto *CAT1
= dyn_cast
<ConstantArrayType
>(AT1
)) {
6065 auto *CAT2
= dyn_cast
<ConstantArrayType
>(AT2
);
6066 if (!((CAT2
&& CAT1
->getSize() == CAT2
->getSize()) ||
6067 (AllowPiMismatch
&& getLangOpts().CPlusPlus20
&&
6068 isa
<IncompleteArrayType
>(AT2
))))
6070 } else if (isa
<IncompleteArrayType
>(AT1
)) {
6071 if (!(isa
<IncompleteArrayType
>(AT2
) ||
6072 (AllowPiMismatch
&& getLangOpts().CPlusPlus20
&&
6073 isa
<ConstantArrayType
>(AT2
))))
6079 T1
= AT1
->getElementType();
6080 T2
= AT2
->getElementType();
6084 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
6086 /// If T1 and T2 are both pointer types of the same kind, or both array types
6087 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is
6088 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
6090 /// This function will typically be called in a loop that successively
6091 /// "unwraps" pointer and pointer-to-member types to compare them at each
6094 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
6095 /// C++20 [conv.qual], if permitted by the current language mode.
6097 /// \return \c true if a pointer type was unwrapped, \c false if we reached a
6098 /// pair of types that can't be unwrapped further.
6099 bool ASTContext::UnwrapSimilarTypes(QualType
&T1
, QualType
&T2
,
6100 bool AllowPiMismatch
) {
6101 UnwrapSimilarArrayTypes(T1
, T2
, AllowPiMismatch
);
6103 const auto *T1PtrType
= T1
->getAs
<PointerType
>();
6104 const auto *T2PtrType
= T2
->getAs
<PointerType
>();
6105 if (T1PtrType
&& T2PtrType
) {
6106 T1
= T1PtrType
->getPointeeType();
6107 T2
= T2PtrType
->getPointeeType();
6111 const auto *T1MPType
= T1
->getAs
<MemberPointerType
>();
6112 const auto *T2MPType
= T2
->getAs
<MemberPointerType
>();
6113 if (T1MPType
&& T2MPType
&&
6114 hasSameUnqualifiedType(QualType(T1MPType
->getClass(), 0),
6115 QualType(T2MPType
->getClass(), 0))) {
6116 T1
= T1MPType
->getPointeeType();
6117 T2
= T2MPType
->getPointeeType();
6121 if (getLangOpts().ObjC
) {
6122 const auto *T1OPType
= T1
->getAs
<ObjCObjectPointerType
>();
6123 const auto *T2OPType
= T2
->getAs
<ObjCObjectPointerType
>();
6124 if (T1OPType
&& T2OPType
) {
6125 T1
= T1OPType
->getPointeeType();
6126 T2
= T2OPType
->getPointeeType();
6131 // FIXME: Block pointers, too?
6136 bool ASTContext::hasSimilarType(QualType T1
, QualType T2
) {
6139 T1
= getUnqualifiedArrayType(T1
, Quals
);
6140 T2
= getUnqualifiedArrayType(T2
, Quals
);
6141 if (hasSameType(T1
, T2
))
6143 if (!UnwrapSimilarTypes(T1
, T2
))
6148 bool ASTContext::hasCvrSimilarType(QualType T1
, QualType T2
) {
6150 Qualifiers Quals1
, Quals2
;
6151 T1
= getUnqualifiedArrayType(T1
, Quals1
);
6152 T2
= getUnqualifiedArrayType(T2
, Quals2
);
6154 Quals1
.removeCVRQualifiers();
6155 Quals2
.removeCVRQualifiers();
6156 if (Quals1
!= Quals2
)
6159 if (hasSameType(T1
, T2
))
6162 if (!UnwrapSimilarTypes(T1
, T2
, /*AllowPiMismatch*/ false))
6168 ASTContext::getNameForTemplate(TemplateName Name
,
6169 SourceLocation NameLoc
) const {
6170 switch (Name
.getKind()) {
6171 case TemplateName::QualifiedTemplate
:
6172 case TemplateName::Template
:
6173 // DNInfo work in progress: CHECKME: what about DNLoc?
6174 return DeclarationNameInfo(Name
.getAsTemplateDecl()->getDeclName(),
6177 case TemplateName::OverloadedTemplate
: {
6178 OverloadedTemplateStorage
*Storage
= Name
.getAsOverloadedTemplate();
6179 // DNInfo work in progress: CHECKME: what about DNLoc?
6180 return DeclarationNameInfo((*Storage
->begin())->getDeclName(), NameLoc
);
6183 case TemplateName::AssumedTemplate
: {
6184 AssumedTemplateStorage
*Storage
= Name
.getAsAssumedTemplateName();
6185 return DeclarationNameInfo(Storage
->getDeclName(), NameLoc
);
6188 case TemplateName::DependentTemplate
: {
6189 DependentTemplateName
*DTN
= Name
.getAsDependentTemplateName();
6190 DeclarationName DName
;
6191 if (DTN
->isIdentifier()) {
6192 DName
= DeclarationNames
.getIdentifier(DTN
->getIdentifier());
6193 return DeclarationNameInfo(DName
, NameLoc
);
6195 DName
= DeclarationNames
.getCXXOperatorName(DTN
->getOperator());
6196 // DNInfo work in progress: FIXME: source locations?
6197 DeclarationNameLoc DNLoc
=
6198 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange());
6199 return DeclarationNameInfo(DName
, NameLoc
, DNLoc
);
6203 case TemplateName::SubstTemplateTemplateParm
: {
6204 SubstTemplateTemplateParmStorage
*subst
6205 = Name
.getAsSubstTemplateTemplateParm();
6206 return DeclarationNameInfo(subst
->getParameter()->getDeclName(),
6210 case TemplateName::SubstTemplateTemplateParmPack
: {
6211 SubstTemplateTemplateParmPackStorage
*subst
6212 = Name
.getAsSubstTemplateTemplateParmPack();
6213 return DeclarationNameInfo(subst
->getParameterPack()->getDeclName(),
6216 case TemplateName::UsingTemplate
:
6217 return DeclarationNameInfo(Name
.getAsUsingShadowDecl()->getDeclName(),
6221 llvm_unreachable("bad template name kind!");
6225 ASTContext::getCanonicalTemplateName(const TemplateName
&Name
) const {
6226 switch (Name
.getKind()) {
6227 case TemplateName::UsingTemplate
:
6228 case TemplateName::QualifiedTemplate
:
6229 case TemplateName::Template
: {
6230 TemplateDecl
*Template
= Name
.getAsTemplateDecl();
6231 if (auto *TTP
= dyn_cast
<TemplateTemplateParmDecl
>(Template
))
6232 Template
= getCanonicalTemplateTemplateParmDecl(TTP
);
6234 // The canonical template name is the canonical template declaration.
6235 return TemplateName(cast
<TemplateDecl
>(Template
->getCanonicalDecl()));
6238 case TemplateName::OverloadedTemplate
:
6239 case TemplateName::AssumedTemplate
:
6240 llvm_unreachable("cannot canonicalize unresolved template");
6242 case TemplateName::DependentTemplate
: {
6243 DependentTemplateName
*DTN
= Name
.getAsDependentTemplateName();
6244 assert(DTN
&& "Non-dependent template names must refer to template decls.");
6245 return DTN
->CanonicalTemplateName
;
6248 case TemplateName::SubstTemplateTemplateParm
: {
6249 SubstTemplateTemplateParmStorage
*subst
6250 = Name
.getAsSubstTemplateTemplateParm();
6251 return getCanonicalTemplateName(subst
->getReplacement());
6254 case TemplateName::SubstTemplateTemplateParmPack
: {
6255 SubstTemplateTemplateParmPackStorage
*subst
=
6256 Name
.getAsSubstTemplateTemplateParmPack();
6257 TemplateArgument canonArgPack
=
6258 getCanonicalTemplateArgument(subst
->getArgumentPack());
6259 return getSubstTemplateTemplateParmPack(
6260 canonArgPack
, subst
->getAssociatedDecl()->getCanonicalDecl(),
6261 subst
->getFinal(), subst
->getIndex());
6265 llvm_unreachable("bad template name!");
6268 bool ASTContext::hasSameTemplateName(const TemplateName
&X
,
6269 const TemplateName
&Y
) const {
6270 return getCanonicalTemplateName(X
).getAsVoidPointer() ==
6271 getCanonicalTemplateName(Y
).getAsVoidPointer();
6274 bool ASTContext::isSameConstraintExpr(const Expr
*XCE
, const Expr
*YCE
) const {
6281 llvm::FoldingSetNodeID XCEID
, YCEID
;
6282 XCE
->Profile(XCEID
, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
6283 YCE
->Profile(YCEID
, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
6284 return XCEID
== YCEID
;
6287 bool ASTContext::isSameTypeConstraint(const TypeConstraint
*XTC
,
6288 const TypeConstraint
*YTC
) const {
6295 auto *NCX
= XTC
->getNamedConcept();
6296 auto *NCY
= YTC
->getNamedConcept();
6297 if (!NCX
|| !NCY
|| !isSameEntity(NCX
, NCY
))
6299 if (XTC
->getConceptReference()->hasExplicitTemplateArgs() !=
6300 YTC
->getConceptReference()->hasExplicitTemplateArgs())
6302 if (XTC
->getConceptReference()->hasExplicitTemplateArgs())
6303 if (XTC
->getConceptReference()
6304 ->getTemplateArgsAsWritten()
6305 ->NumTemplateArgs
!=
6306 YTC
->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs
)
6309 // Compare slowly by profiling.
6311 // We couldn't compare the profiling result for the template
6312 // args here. Consider the following example in different modules:
6314 // template <__integer_like _Tp, C<_Tp> Sentinel>
6315 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const {
6319 // When we compare the profiling result for `C<_Tp>` in different
6320 // modules, it will compare the type of `_Tp` in different modules.
6321 // However, the type of `_Tp` in different modules refer to different
6322 // types here naturally. So we couldn't compare the profiling result
6323 // for the template args directly.
6324 return isSameConstraintExpr(XTC
->getImmediatelyDeclaredConstraint(),
6325 YTC
->getImmediatelyDeclaredConstraint());
6328 bool ASTContext::isSameTemplateParameter(const NamedDecl
*X
,
6329 const NamedDecl
*Y
) const {
6330 if (X
->getKind() != Y
->getKind())
6333 if (auto *TX
= dyn_cast
<TemplateTypeParmDecl
>(X
)) {
6334 auto *TY
= cast
<TemplateTypeParmDecl
>(Y
);
6335 if (TX
->isParameterPack() != TY
->isParameterPack())
6337 if (TX
->hasTypeConstraint() != TY
->hasTypeConstraint())
6339 return isSameTypeConstraint(TX
->getTypeConstraint(),
6340 TY
->getTypeConstraint());
6343 if (auto *TX
= dyn_cast
<NonTypeTemplateParmDecl
>(X
)) {
6344 auto *TY
= cast
<NonTypeTemplateParmDecl
>(Y
);
6345 return TX
->isParameterPack() == TY
->isParameterPack() &&
6346 TX
->getASTContext().hasSameType(TX
->getType(), TY
->getType()) &&
6347 isSameConstraintExpr(TX
->getPlaceholderTypeConstraint(),
6348 TY
->getPlaceholderTypeConstraint());
6351 auto *TX
= cast
<TemplateTemplateParmDecl
>(X
);
6352 auto *TY
= cast
<TemplateTemplateParmDecl
>(Y
);
6353 return TX
->isParameterPack() == TY
->isParameterPack() &&
6354 isSameTemplateParameterList(TX
->getTemplateParameters(),
6355 TY
->getTemplateParameters());
6358 bool ASTContext::isSameTemplateParameterList(
6359 const TemplateParameterList
*X
, const TemplateParameterList
*Y
) const {
6360 if (X
->size() != Y
->size())
6363 for (unsigned I
= 0, N
= X
->size(); I
!= N
; ++I
)
6364 if (!isSameTemplateParameter(X
->getParam(I
), Y
->getParam(I
)))
6367 return isSameConstraintExpr(X
->getRequiresClause(), Y
->getRequiresClause());
6370 bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl
*X
,
6371 const NamedDecl
*Y
) const {
6372 // If the type parameter isn't the same already, we don't need to check the
6373 // default argument further.
6374 if (!isSameTemplateParameter(X
, Y
))
6377 if (auto *TTPX
= dyn_cast
<TemplateTypeParmDecl
>(X
)) {
6378 auto *TTPY
= cast
<TemplateTypeParmDecl
>(Y
);
6379 if (!TTPX
->hasDefaultArgument() || !TTPY
->hasDefaultArgument())
6382 return hasSameType(TTPX
->getDefaultArgument(), TTPY
->getDefaultArgument());
6385 if (auto *NTTPX
= dyn_cast
<NonTypeTemplateParmDecl
>(X
)) {
6386 auto *NTTPY
= cast
<NonTypeTemplateParmDecl
>(Y
);
6387 if (!NTTPX
->hasDefaultArgument() || !NTTPY
->hasDefaultArgument())
6390 Expr
*DefaultArgumentX
= NTTPX
->getDefaultArgument()->IgnoreImpCasts();
6391 Expr
*DefaultArgumentY
= NTTPY
->getDefaultArgument()->IgnoreImpCasts();
6392 llvm::FoldingSetNodeID XID
, YID
;
6393 DefaultArgumentX
->Profile(XID
, *this, /*Canonical=*/true);
6394 DefaultArgumentY
->Profile(YID
, *this, /*Canonical=*/true);
6398 auto *TTPX
= cast
<TemplateTemplateParmDecl
>(X
);
6399 auto *TTPY
= cast
<TemplateTemplateParmDecl
>(Y
);
6401 if (!TTPX
->hasDefaultArgument() || !TTPY
->hasDefaultArgument())
6404 const TemplateArgument
&TAX
= TTPX
->getDefaultArgument().getArgument();
6405 const TemplateArgument
&TAY
= TTPY
->getDefaultArgument().getArgument();
6406 return hasSameTemplateName(TAX
.getAsTemplate(), TAY
.getAsTemplate());
6409 static NamespaceDecl
*getNamespace(const NestedNameSpecifier
*X
) {
6410 if (auto *NS
= X
->getAsNamespace())
6412 if (auto *NAS
= X
->getAsNamespaceAlias())
6413 return NAS
->getNamespace();
6417 static bool isSameQualifier(const NestedNameSpecifier
*X
,
6418 const NestedNameSpecifier
*Y
) {
6419 if (auto *NSX
= getNamespace(X
)) {
6420 auto *NSY
= getNamespace(Y
);
6421 if (!NSY
|| NSX
->getCanonicalDecl() != NSY
->getCanonicalDecl())
6423 } else if (X
->getKind() != Y
->getKind())
6426 // FIXME: For namespaces and types, we're permitted to check that the entity
6427 // is named via the same tokens. We should probably do so.
6428 switch (X
->getKind()) {
6429 case NestedNameSpecifier::Identifier
:
6430 if (X
->getAsIdentifier() != Y
->getAsIdentifier())
6433 case NestedNameSpecifier::Namespace
:
6434 case NestedNameSpecifier::NamespaceAlias
:
6435 // We've already checked that we named the same namespace.
6437 case NestedNameSpecifier::TypeSpec
:
6438 case NestedNameSpecifier::TypeSpecWithTemplate
:
6439 if (X
->getAsType()->getCanonicalTypeInternal() !=
6440 Y
->getAsType()->getCanonicalTypeInternal())
6443 case NestedNameSpecifier::Global
:
6444 case NestedNameSpecifier::Super
:
6448 // Recurse into earlier portion of NNS, if any.
6449 auto *PX
= X
->getPrefix();
6450 auto *PY
= Y
->getPrefix();
6452 return isSameQualifier(PX
, PY
);
6456 /// Determine whether the attributes we can overload on are identical for A and
6457 /// B. Will ignore any overloadable attrs represented in the type of A and B.
6458 static bool hasSameOverloadableAttrs(const FunctionDecl
*A
,
6459 const FunctionDecl
*B
) {
6460 // Note that pass_object_size attributes are represented in the function's
6461 // ExtParameterInfo, so we don't need to check them here.
6463 llvm::FoldingSetNodeID Cand1ID
, Cand2ID
;
6464 auto AEnableIfAttrs
= A
->specific_attrs
<EnableIfAttr
>();
6465 auto BEnableIfAttrs
= B
->specific_attrs
<EnableIfAttr
>();
6467 for (auto Pair
: zip_longest(AEnableIfAttrs
, BEnableIfAttrs
)) {
6468 std::optional
<EnableIfAttr
*> Cand1A
= std::get
<0>(Pair
);
6469 std::optional
<EnableIfAttr
*> Cand2A
= std::get
<1>(Pair
);
6471 // Return false if the number of enable_if attributes is different.
6472 if (!Cand1A
|| !Cand2A
)
6478 (*Cand1A
)->getCond()->Profile(Cand1ID
, A
->getASTContext(), true);
6479 (*Cand2A
)->getCond()->Profile(Cand2ID
, B
->getASTContext(), true);
6481 // Return false if any of the enable_if expressions of A and B are
6483 if (Cand1ID
!= Cand2ID
)
6489 bool ASTContext::isSameEntity(const NamedDecl
*X
, const NamedDecl
*Y
) const {
6490 // Caution: this function is called by the AST reader during deserialization,
6491 // so it cannot rely on AST invariants being met. Non-trivial accessors
6492 // should be avoided, along with any traversal of redeclaration chains.
6497 if (X
->getDeclName() != Y
->getDeclName())
6500 // Must be in the same context.
6502 // Note that we can't use DeclContext::Equals here, because the DeclContexts
6503 // could be two different declarations of the same function. (We will fix the
6504 // semantic DC to refer to the primary definition after merging.)
6505 if (!declaresSameEntity(cast
<Decl
>(X
->getDeclContext()->getRedeclContext()),
6506 cast
<Decl
>(Y
->getDeclContext()->getRedeclContext())))
6509 // Two typedefs refer to the same entity if they have the same underlying
6511 if (const auto *TypedefX
= dyn_cast
<TypedefNameDecl
>(X
))
6512 if (const auto *TypedefY
= dyn_cast
<TypedefNameDecl
>(Y
))
6513 return hasSameType(TypedefX
->getUnderlyingType(),
6514 TypedefY
->getUnderlyingType());
6516 // Must have the same kind.
6517 if (X
->getKind() != Y
->getKind())
6520 // Objective-C classes and protocols with the same name always match.
6521 if (isa
<ObjCInterfaceDecl
>(X
) || isa
<ObjCProtocolDecl
>(X
))
6524 if (isa
<ClassTemplateSpecializationDecl
>(X
)) {
6525 // No need to handle these here: we merge them when adding them to the
6530 // Compatible tags match.
6531 if (const auto *TagX
= dyn_cast
<TagDecl
>(X
)) {
6532 const auto *TagY
= cast
<TagDecl
>(Y
);
6533 return (TagX
->getTagKind() == TagY
->getTagKind()) ||
6534 ((TagX
->getTagKind() == TagTypeKind::Struct
||
6535 TagX
->getTagKind() == TagTypeKind::Class
||
6536 TagX
->getTagKind() == TagTypeKind::Interface
) &&
6537 (TagY
->getTagKind() == TagTypeKind::Struct
||
6538 TagY
->getTagKind() == TagTypeKind::Class
||
6539 TagY
->getTagKind() == TagTypeKind::Interface
));
6542 // Functions with the same type and linkage match.
6543 // FIXME: This needs to cope with merging of prototyped/non-prototyped
6545 if (const auto *FuncX
= dyn_cast
<FunctionDecl
>(X
)) {
6546 const auto *FuncY
= cast
<FunctionDecl
>(Y
);
6547 if (const auto *CtorX
= dyn_cast
<CXXConstructorDecl
>(X
)) {
6548 const auto *CtorY
= cast
<CXXConstructorDecl
>(Y
);
6549 if (CtorX
->getInheritedConstructor() &&
6550 !isSameEntity(CtorX
->getInheritedConstructor().getConstructor(),
6551 CtorY
->getInheritedConstructor().getConstructor()))
6555 if (FuncX
->isMultiVersion() != FuncY
->isMultiVersion())
6558 // Multiversioned functions with different feature strings are represented
6559 // as separate declarations.
6560 if (FuncX
->isMultiVersion()) {
6561 const auto *TAX
= FuncX
->getAttr
<TargetAttr
>();
6562 const auto *TAY
= FuncY
->getAttr
<TargetAttr
>();
6563 assert(TAX
&& TAY
&& "Multiversion Function without target attribute");
6565 if (TAX
->getFeaturesStr() != TAY
->getFeaturesStr())
6569 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes
6570 // not the same entity if they are constrained.
6571 if ((FuncX
->isMemberLikeConstrainedFriend() ||
6572 FuncY
->isMemberLikeConstrainedFriend()) &&
6573 !FuncX
->getLexicalDeclContext()->Equals(
6574 FuncY
->getLexicalDeclContext())) {
6578 if (!isSameConstraintExpr(FuncX
->getTrailingRequiresClause(),
6579 FuncY
->getTrailingRequiresClause()))
6582 auto GetTypeAsWritten
= [](const FunctionDecl
*FD
) {
6583 // Map to the first declaration that we've already merged into this one.
6584 // The TSI of redeclarations might not match (due to calling conventions
6585 // being inherited onto the type but not the TSI), but the TSI type of
6586 // the first declaration of the function should match across modules.
6587 FD
= FD
->getCanonicalDecl();
6588 return FD
->getTypeSourceInfo() ? FD
->getTypeSourceInfo()->getType()
6591 QualType XT
= GetTypeAsWritten(FuncX
), YT
= GetTypeAsWritten(FuncY
);
6592 if (!hasSameType(XT
, YT
)) {
6593 // We can get functions with different types on the redecl chain in C++17
6594 // if they have differing exception specifications and at least one of
6595 // the excpetion specs is unresolved.
6596 auto *XFPT
= XT
->getAs
<FunctionProtoType
>();
6597 auto *YFPT
= YT
->getAs
<FunctionProtoType
>();
6598 if (getLangOpts().CPlusPlus17
&& XFPT
&& YFPT
&&
6599 (isUnresolvedExceptionSpec(XFPT
->getExceptionSpecType()) ||
6600 isUnresolvedExceptionSpec(YFPT
->getExceptionSpecType())) &&
6601 hasSameFunctionTypeIgnoringExceptionSpec(XT
, YT
))
6606 return FuncX
->getLinkageInternal() == FuncY
->getLinkageInternal() &&
6607 hasSameOverloadableAttrs(FuncX
, FuncY
);
6610 // Variables with the same type and linkage match.
6611 if (const auto *VarX
= dyn_cast
<VarDecl
>(X
)) {
6612 const auto *VarY
= cast
<VarDecl
>(Y
);
6613 if (VarX
->getLinkageInternal() == VarY
->getLinkageInternal()) {
6614 // During deserialization, we might compare variables before we load
6615 // their types. Assume the types will end up being the same.
6616 if (VarX
->getType().isNull() || VarY
->getType().isNull())
6619 if (hasSameType(VarX
->getType(), VarY
->getType()))
6622 // We can get decls with different types on the redecl chain. Eg.
6623 // template <typename T> struct S { static T Var[]; }; // #1
6624 // template <typename T> T S<T>::Var[sizeof(T)]; // #2
6625 // Only? happens when completing an incomplete array type. In this case
6626 // when comparing #1 and #2 we should go through their element type.
6627 const ArrayType
*VarXTy
= getAsArrayType(VarX
->getType());
6628 const ArrayType
*VarYTy
= getAsArrayType(VarY
->getType());
6629 if (!VarXTy
|| !VarYTy
)
6631 if (VarXTy
->isIncompleteArrayType() || VarYTy
->isIncompleteArrayType())
6632 return hasSameType(VarXTy
->getElementType(), VarYTy
->getElementType());
6637 // Namespaces with the same name and inlinedness match.
6638 if (const auto *NamespaceX
= dyn_cast
<NamespaceDecl
>(X
)) {
6639 const auto *NamespaceY
= cast
<NamespaceDecl
>(Y
);
6640 return NamespaceX
->isInline() == NamespaceY
->isInline();
6643 // Identical template names and kinds match if their template parameter lists
6644 // and patterns match.
6645 if (const auto *TemplateX
= dyn_cast
<TemplateDecl
>(X
)) {
6646 const auto *TemplateY
= cast
<TemplateDecl
>(Y
);
6648 // ConceptDecl wouldn't be the same if their constraint expression differs.
6649 if (const auto *ConceptX
= dyn_cast
<ConceptDecl
>(X
)) {
6650 const auto *ConceptY
= cast
<ConceptDecl
>(Y
);
6651 if (!isSameConstraintExpr(ConceptX
->getConstraintExpr(),
6652 ConceptY
->getConstraintExpr()))
6656 return isSameEntity(TemplateX
->getTemplatedDecl(),
6657 TemplateY
->getTemplatedDecl()) &&
6658 isSameTemplateParameterList(TemplateX
->getTemplateParameters(),
6659 TemplateY
->getTemplateParameters());
6662 // Fields with the same name and the same type match.
6663 if (const auto *FDX
= dyn_cast
<FieldDecl
>(X
)) {
6664 const auto *FDY
= cast
<FieldDecl
>(Y
);
6665 // FIXME: Also check the bitwidth is odr-equivalent, if any.
6666 return hasSameType(FDX
->getType(), FDY
->getType());
6669 // Indirect fields with the same target field match.
6670 if (const auto *IFDX
= dyn_cast
<IndirectFieldDecl
>(X
)) {
6671 const auto *IFDY
= cast
<IndirectFieldDecl
>(Y
);
6672 return IFDX
->getAnonField()->getCanonicalDecl() ==
6673 IFDY
->getAnonField()->getCanonicalDecl();
6676 // Enumerators with the same name match.
6677 if (isa
<EnumConstantDecl
>(X
))
6678 // FIXME: Also check the value is odr-equivalent.
6681 // Using shadow declarations with the same target match.
6682 if (const auto *USX
= dyn_cast
<UsingShadowDecl
>(X
)) {
6683 const auto *USY
= cast
<UsingShadowDecl
>(Y
);
6684 return USX
->getTargetDecl() == USY
->getTargetDecl();
6687 // Using declarations with the same qualifier match. (We already know that
6688 // the name matches.)
6689 if (const auto *UX
= dyn_cast
<UsingDecl
>(X
)) {
6690 const auto *UY
= cast
<UsingDecl
>(Y
);
6691 return isSameQualifier(UX
->getQualifier(), UY
->getQualifier()) &&
6692 UX
->hasTypename() == UY
->hasTypename() &&
6693 UX
->isAccessDeclaration() == UY
->isAccessDeclaration();
6695 if (const auto *UX
= dyn_cast
<UnresolvedUsingValueDecl
>(X
)) {
6696 const auto *UY
= cast
<UnresolvedUsingValueDecl
>(Y
);
6697 return isSameQualifier(UX
->getQualifier(), UY
->getQualifier()) &&
6698 UX
->isAccessDeclaration() == UY
->isAccessDeclaration();
6700 if (const auto *UX
= dyn_cast
<UnresolvedUsingTypenameDecl
>(X
)) {
6701 return isSameQualifier(
6703 cast
<UnresolvedUsingTypenameDecl
>(Y
)->getQualifier());
6706 // Using-pack declarations are only created by instantiation, and match if
6707 // they're instantiated from matching UnresolvedUsing...Decls.
6708 if (const auto *UX
= dyn_cast
<UsingPackDecl
>(X
)) {
6709 return declaresSameEntity(
6710 UX
->getInstantiatedFromUsingDecl(),
6711 cast
<UsingPackDecl
>(Y
)->getInstantiatedFromUsingDecl());
6714 // Namespace alias definitions with the same target match.
6715 if (const auto *NAX
= dyn_cast
<NamespaceAliasDecl
>(X
)) {
6716 const auto *NAY
= cast
<NamespaceAliasDecl
>(Y
);
6717 return NAX
->getNamespace()->Equals(NAY
->getNamespace());
6724 ASTContext::getCanonicalTemplateArgument(const TemplateArgument
&Arg
) const {
6725 switch (Arg
.getKind()) {
6726 case TemplateArgument::Null
:
6729 case TemplateArgument::Expression
:
6732 case TemplateArgument::Declaration
: {
6733 auto *D
= cast
<ValueDecl
>(Arg
.getAsDecl()->getCanonicalDecl());
6734 return TemplateArgument(D
, getCanonicalType(Arg
.getParamTypeForDecl()),
6735 Arg
.getIsDefaulted());
6738 case TemplateArgument::NullPtr
:
6739 return TemplateArgument(getCanonicalType(Arg
.getNullPtrType()),
6740 /*isNullPtr*/ true, Arg
.getIsDefaulted());
6742 case TemplateArgument::Template
:
6743 return TemplateArgument(getCanonicalTemplateName(Arg
.getAsTemplate()),
6744 Arg
.getIsDefaulted());
6746 case TemplateArgument::TemplateExpansion
:
6747 return TemplateArgument(
6748 getCanonicalTemplateName(Arg
.getAsTemplateOrTemplatePattern()),
6749 Arg
.getNumTemplateExpansions(), Arg
.getIsDefaulted());
6751 case TemplateArgument::Integral
:
6752 return TemplateArgument(Arg
, getCanonicalType(Arg
.getIntegralType()));
6754 case TemplateArgument::Type
:
6755 return TemplateArgument(getCanonicalType(Arg
.getAsType()),
6756 /*isNullPtr*/ false, Arg
.getIsDefaulted());
6758 case TemplateArgument::Pack
: {
6759 bool AnyNonCanonArgs
= false;
6760 auto CanonArgs
= ::getCanonicalTemplateArguments(
6761 *this, Arg
.pack_elements(), AnyNonCanonArgs
);
6762 if (!AnyNonCanonArgs
)
6764 return TemplateArgument::CreatePackCopy(const_cast<ASTContext
&>(*this),
6769 // Silence GCC warning
6770 llvm_unreachable("Unhandled template argument kind");
6773 NestedNameSpecifier
*
6774 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier
*NNS
) const {
6778 switch (NNS
->getKind()) {
6779 case NestedNameSpecifier::Identifier
:
6780 // Canonicalize the prefix but keep the identifier the same.
6781 return NestedNameSpecifier::Create(*this,
6782 getCanonicalNestedNameSpecifier(NNS
->getPrefix()),
6783 NNS
->getAsIdentifier());
6785 case NestedNameSpecifier::Namespace
:
6786 // A namespace is canonical; build a nested-name-specifier with
6787 // this namespace and no prefix.
6788 return NestedNameSpecifier::Create(*this, nullptr,
6789 NNS
->getAsNamespace()->getOriginalNamespace());
6791 case NestedNameSpecifier::NamespaceAlias
:
6792 // A namespace is canonical; build a nested-name-specifier with
6793 // this namespace and no prefix.
6794 return NestedNameSpecifier::Create(*this, nullptr,
6795 NNS
->getAsNamespaceAlias()->getNamespace()
6796 ->getOriginalNamespace());
6798 // The difference between TypeSpec and TypeSpecWithTemplate is that the
6799 // latter will have the 'template' keyword when printed.
6800 case NestedNameSpecifier::TypeSpec
:
6801 case NestedNameSpecifier::TypeSpecWithTemplate
: {
6802 const Type
*T
= getCanonicalType(NNS
->getAsType());
6804 // If we have some kind of dependent-named type (e.g., "typename T::type"),
6805 // break it apart into its prefix and identifier, then reconsititute those
6806 // as the canonical nested-name-specifier. This is required to canonicalize
6807 // a dependent nested-name-specifier involving typedefs of dependent-name
6809 // typedef typename T::type T1;
6810 // typedef typename T1::type T2;
6811 if (const auto *DNT
= T
->getAs
<DependentNameType
>())
6812 return NestedNameSpecifier::Create(
6813 *this, DNT
->getQualifier(),
6814 const_cast<IdentifierInfo
*>(DNT
->getIdentifier()));
6815 if (const auto *DTST
= T
->getAs
<DependentTemplateSpecializationType
>())
6816 return NestedNameSpecifier::Create(*this, DTST
->getQualifier(), true,
6817 const_cast<Type
*>(T
));
6819 // TODO: Set 'Template' parameter to true for other template types.
6820 return NestedNameSpecifier::Create(*this, nullptr, false,
6821 const_cast<Type
*>(T
));
6824 case NestedNameSpecifier::Global
:
6825 case NestedNameSpecifier::Super
:
6826 // The global specifier and __super specifer are canonical and unique.
6830 llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
6833 const ArrayType
*ASTContext::getAsArrayType(QualType T
) const {
6834 // Handle the non-qualified case efficiently.
6835 if (!T
.hasLocalQualifiers()) {
6836 // Handle the common positive case fast.
6837 if (const auto *AT
= dyn_cast
<ArrayType
>(T
))
6841 // Handle the common negative case fast.
6842 if (!isa
<ArrayType
>(T
.getCanonicalType()))
6845 // Apply any qualifiers from the array type to the element type. This
6846 // implements C99 6.7.3p8: "If the specification of an array type includes
6847 // any type qualifiers, the element type is so qualified, not the array type."
6849 // If we get here, we either have type qualifiers on the type, or we have
6850 // sugar such as a typedef in the way. If we have type qualifiers on the type
6851 // we must propagate them down into the element type.
6853 SplitQualType split
= T
.getSplitDesugaredType();
6854 Qualifiers qs
= split
.Quals
;
6856 // If we have a simple case, just return now.
6857 const auto *ATy
= dyn_cast
<ArrayType
>(split
.Ty
);
6858 if (!ATy
|| qs
.empty())
6861 // Otherwise, we have an array and we have qualifiers on it. Push the
6862 // qualifiers into the array element type and return a new array type.
6863 QualType NewEltTy
= getQualifiedType(ATy
->getElementType(), qs
);
6865 if (const auto *CAT
= dyn_cast
<ConstantArrayType
>(ATy
))
6866 return cast
<ArrayType
>(getConstantArrayType(NewEltTy
, CAT
->getSize(),
6868 CAT
->getSizeModifier(),
6869 CAT
->getIndexTypeCVRQualifiers()));
6870 if (const auto *IAT
= dyn_cast
<IncompleteArrayType
>(ATy
))
6871 return cast
<ArrayType
>(getIncompleteArrayType(NewEltTy
,
6872 IAT
->getSizeModifier(),
6873 IAT
->getIndexTypeCVRQualifiers()));
6875 if (const auto *DSAT
= dyn_cast
<DependentSizedArrayType
>(ATy
))
6876 return cast
<ArrayType
>(
6877 getDependentSizedArrayType(NewEltTy
,
6878 DSAT
->getSizeExpr(),
6879 DSAT
->getSizeModifier(),
6880 DSAT
->getIndexTypeCVRQualifiers(),
6881 DSAT
->getBracketsRange()));
6883 const auto *VAT
= cast
<VariableArrayType
>(ATy
);
6884 return cast
<ArrayType
>(getVariableArrayType(NewEltTy
,
6886 VAT
->getSizeModifier(),
6887 VAT
->getIndexTypeCVRQualifiers(),
6888 VAT
->getBracketsRange()));
6891 QualType
ASTContext::getAdjustedParameterType(QualType T
) const {
6892 if (T
->isArrayType() || T
->isFunctionType())
6893 return getDecayedType(T
);
6897 QualType
ASTContext::getSignatureParameterType(QualType T
) const {
6898 T
= getVariableArrayDecayedType(T
);
6899 T
= getAdjustedParameterType(T
);
6900 return T
.getUnqualifiedType();
6903 QualType
ASTContext::getExceptionObjectType(QualType T
) const {
6904 // C++ [except.throw]p3:
6905 // A throw-expression initializes a temporary object, called the exception
6906 // object, the type of which is determined by removing any top-level
6907 // cv-qualifiers from the static type of the operand of throw and adjusting
6908 // the type from "array of T" or "function returning T" to "pointer to T"
6909 // or "pointer to function returning T", [...]
6910 T
= getVariableArrayDecayedType(T
);
6911 if (T
->isArrayType() || T
->isFunctionType())
6912 T
= getDecayedType(T
);
6913 return T
.getUnqualifiedType();
6916 /// getArrayDecayedType - Return the properly qualified result of decaying the
6917 /// specified array type to a pointer. This operation is non-trivial when
6918 /// handling typedefs etc. The canonical type of "T" must be an array type,
6919 /// this returns a pointer to a properly qualified element of the array.
6921 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
6922 QualType
ASTContext::getArrayDecayedType(QualType Ty
) const {
6923 // Get the element type with 'getAsArrayType' so that we don't lose any
6924 // typedefs in the element type of the array. This also handles propagation
6925 // of type qualifiers from the array type into the element type if present
6927 const ArrayType
*PrettyArrayType
= getAsArrayType(Ty
);
6928 assert(PrettyArrayType
&& "Not an array type!");
6930 QualType PtrTy
= getPointerType(PrettyArrayType
->getElementType());
6932 // int x[restrict 4] -> int *restrict
6933 QualType Result
= getQualifiedType(PtrTy
,
6934 PrettyArrayType
->getIndexTypeQualifiers());
6936 // int x[_Nullable] -> int * _Nullable
6937 if (auto Nullability
= Ty
->getNullability()) {
6938 Result
= const_cast<ASTContext
*>(this)->getAttributedType(
6939 AttributedType::getNullabilityAttrKind(*Nullability
), Result
, Result
);
6944 QualType
ASTContext::getBaseElementType(const ArrayType
*array
) const {
6945 return getBaseElementType(array
->getElementType());
6948 QualType
ASTContext::getBaseElementType(QualType type
) const {
6951 SplitQualType split
= type
.getSplitDesugaredType();
6952 const ArrayType
*array
= split
.Ty
->getAsArrayTypeUnsafe();
6955 type
= array
->getElementType();
6956 qs
.addConsistentQualifiers(split
.Quals
);
6959 return getQualifiedType(type
, qs
);
6962 /// getConstantArrayElementCount - Returns number of constant array elements.
6964 ASTContext::getConstantArrayElementCount(const ConstantArrayType
*CA
) const {
6965 uint64_t ElementCount
= 1;
6967 ElementCount
*= CA
->getSize().getZExtValue();
6968 CA
= dyn_cast_or_null
<ConstantArrayType
>(
6969 CA
->getElementType()->getAsArrayTypeUnsafe());
6971 return ElementCount
;
6974 uint64_t ASTContext::getArrayInitLoopExprElementCount(
6975 const ArrayInitLoopExpr
*AILE
) const {
6979 uint64_t ElementCount
= 1;
6982 ElementCount
*= AILE
->getArraySize().getZExtValue();
6983 AILE
= dyn_cast
<ArrayInitLoopExpr
>(AILE
->getSubExpr());
6986 return ElementCount
;
6989 /// getFloatingRank - Return a relative rank for floating point types.
6990 /// This routine will assert if passed a built-in type that isn't a float.
6991 static FloatingRank
getFloatingRank(QualType T
) {
6992 if (const auto *CT
= T
->getAs
<ComplexType
>())
6993 return getFloatingRank(CT
->getElementType());
6995 switch (T
->castAs
<BuiltinType
>()->getKind()) {
6996 default: llvm_unreachable("getFloatingRank(): not a floating type");
6997 case BuiltinType::Float16
: return Float16Rank
;
6998 case BuiltinType::Half
: return HalfRank
;
6999 case BuiltinType::Float
: return FloatRank
;
7000 case BuiltinType::Double
: return DoubleRank
;
7001 case BuiltinType::LongDouble
: return LongDoubleRank
;
7002 case BuiltinType::Float128
: return Float128Rank
;
7003 case BuiltinType::BFloat16
: return BFloat16Rank
;
7004 case BuiltinType::Ibm128
: return Ibm128Rank
;
7008 /// getFloatingTypeOrder - Compare the rank of the two specified floating
7009 /// point types, ignoring the domain of the type (i.e. 'double' ==
7010 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
7011 /// LHS < RHS, return -1.
7012 int ASTContext::getFloatingTypeOrder(QualType LHS
, QualType RHS
) const {
7013 FloatingRank LHSR
= getFloatingRank(LHS
);
7014 FloatingRank RHSR
= getFloatingRank(RHS
);
7023 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS
, QualType RHS
) const {
7024 if (&getFloatTypeSemantics(LHS
) == &getFloatTypeSemantics(RHS
))
7026 return getFloatingTypeOrder(LHS
, RHS
);
7029 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
7030 /// routine will assert if passed a built-in type that isn't an integer or enum,
7031 /// or if it is not canonicalized.
7032 unsigned ASTContext::getIntegerRank(const Type
*T
) const {
7033 assert(T
->isCanonicalUnqualified() && "T should be canonicalized");
7035 // Results in this 'losing' to any type of the same size, but winning if
7037 if (const auto *EIT
= dyn_cast
<BitIntType
>(T
))
7038 return 0 + (EIT
->getNumBits() << 3);
7040 switch (cast
<BuiltinType
>(T
)->getKind()) {
7041 default: llvm_unreachable("getIntegerRank(): not a built-in integer");
7042 case BuiltinType::Bool
:
7043 return 1 + (getIntWidth(BoolTy
) << 3);
7044 case BuiltinType::Char_S
:
7045 case BuiltinType::Char_U
:
7046 case BuiltinType::SChar
:
7047 case BuiltinType::UChar
:
7048 return 2 + (getIntWidth(CharTy
) << 3);
7049 case BuiltinType::Short
:
7050 case BuiltinType::UShort
:
7051 return 3 + (getIntWidth(ShortTy
) << 3);
7052 case BuiltinType::Int
:
7053 case BuiltinType::UInt
:
7054 return 4 + (getIntWidth(IntTy
) << 3);
7055 case BuiltinType::Long
:
7056 case BuiltinType::ULong
:
7057 return 5 + (getIntWidth(LongTy
) << 3);
7058 case BuiltinType::LongLong
:
7059 case BuiltinType::ULongLong
:
7060 return 6 + (getIntWidth(LongLongTy
) << 3);
7061 case BuiltinType::Int128
:
7062 case BuiltinType::UInt128
:
7063 return 7 + (getIntWidth(Int128Ty
) << 3);
7065 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of
7066 // their underlying types" [c++20 conv.rank]
7067 case BuiltinType::Char8
:
7068 return getIntegerRank(UnsignedCharTy
.getTypePtr());
7069 case BuiltinType::Char16
:
7070 return getIntegerRank(
7071 getFromTargetType(Target
->getChar16Type()).getTypePtr());
7072 case BuiltinType::Char32
:
7073 return getIntegerRank(
7074 getFromTargetType(Target
->getChar32Type()).getTypePtr());
7075 case BuiltinType::WChar_S
:
7076 case BuiltinType::WChar_U
:
7077 return getIntegerRank(
7078 getFromTargetType(Target
->getWCharType()).getTypePtr());
7082 /// Whether this is a promotable bitfield reference according
7083 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
7085 /// \returns the type this bit-field will promote to, or NULL if no
7086 /// promotion occurs.
7087 QualType
ASTContext::isPromotableBitField(Expr
*E
) const {
7088 if (E
->isTypeDependent() || E
->isValueDependent())
7091 // C++ [conv.prom]p5:
7092 // If the bit-field has an enumerated type, it is treated as any other
7093 // value of that type for promotion purposes.
7094 if (getLangOpts().CPlusPlus
&& E
->getType()->isEnumeralType())
7097 // FIXME: We should not do this unless E->refersToBitField() is true. This
7098 // matters in C where getSourceBitField() will find bit-fields for various
7099 // cases where the source expression is not a bit-field designator.
7101 FieldDecl
*Field
= E
->getSourceBitField(); // FIXME: conditional bit-fields?
7105 QualType FT
= Field
->getType();
7107 uint64_t BitWidth
= Field
->getBitWidthValue(*this);
7108 uint64_t IntSize
= getTypeSize(IntTy
);
7109 // C++ [conv.prom]p5:
7110 // A prvalue for an integral bit-field can be converted to a prvalue of type
7111 // int if int can represent all the values of the bit-field; otherwise, it
7112 // can be converted to unsigned int if unsigned int can represent all the
7113 // values of the bit-field. If the bit-field is larger yet, no integral
7114 // promotion applies to it.
7116 // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
7117 // If an int can represent all values of the original type (as restricted by
7118 // the width, for a bit-field), the value is converted to an int; otherwise,
7119 // it is converted to an unsigned int.
7121 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
7122 // We perform that promotion here to match GCC and C++.
7123 // FIXME: C does not permit promotion of an enum bit-field whose rank is
7124 // greater than that of 'int'. We perform that promotion to match GCC.
7125 if (BitWidth
< IntSize
)
7128 if (BitWidth
== IntSize
)
7129 return FT
->isSignedIntegerType() ? IntTy
: UnsignedIntTy
;
7131 // Bit-fields wider than int are not subject to promotions, and therefore act
7132 // like the base type. GCC has some weird bugs in this area that we
7133 // deliberately do not follow (GCC follows a pre-standard resolution to
7134 // C's DR315 which treats bit-width as being part of the type, and this leaks
7135 // into their semantics in some cases).
7139 /// getPromotedIntegerType - Returns the type that Promotable will
7140 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
7142 QualType
ASTContext::getPromotedIntegerType(QualType Promotable
) const {
7143 assert(!Promotable
.isNull());
7144 assert(isPromotableIntegerType(Promotable
));
7145 if (const auto *ET
= Promotable
->getAs
<EnumType
>())
7146 return ET
->getDecl()->getPromotionType();
7148 if (const auto *BT
= Promotable
->getAs
<BuiltinType
>()) {
7149 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
7150 // (3.9.1) can be converted to a prvalue of the first of the following
7151 // types that can represent all the values of its underlying type:
7152 // int, unsigned int, long int, unsigned long int, long long int, or
7153 // unsigned long long int [...]
7154 // FIXME: Is there some better way to compute this?
7155 if (BT
->getKind() == BuiltinType::WChar_S
||
7156 BT
->getKind() == BuiltinType::WChar_U
||
7157 BT
->getKind() == BuiltinType::Char8
||
7158 BT
->getKind() == BuiltinType::Char16
||
7159 BT
->getKind() == BuiltinType::Char32
) {
7160 bool FromIsSigned
= BT
->getKind() == BuiltinType::WChar_S
;
7161 uint64_t FromSize
= getTypeSize(BT
);
7162 QualType PromoteTypes
[] = { IntTy
, UnsignedIntTy
, LongTy
, UnsignedLongTy
,
7163 LongLongTy
, UnsignedLongLongTy
};
7164 for (const auto &PT
: PromoteTypes
) {
7165 uint64_t ToSize
= getTypeSize(PT
);
7166 if (FromSize
< ToSize
||
7167 (FromSize
== ToSize
&& FromIsSigned
== PT
->isSignedIntegerType()))
7170 llvm_unreachable("char type should fit into long long");
7174 // At this point, we should have a signed or unsigned integer type.
7175 if (Promotable
->isSignedIntegerType())
7177 uint64_t PromotableSize
= getIntWidth(Promotable
);
7178 uint64_t IntSize
= getIntWidth(IntTy
);
7179 assert(Promotable
->isUnsignedIntegerType() && PromotableSize
<= IntSize
);
7180 return (PromotableSize
!= IntSize
) ? IntTy
: UnsignedIntTy
;
7183 /// Recurses in pointer/array types until it finds an objc retainable
7184 /// type and returns its ownership.
7185 Qualifiers::ObjCLifetime
ASTContext::getInnerObjCOwnership(QualType T
) const {
7186 while (!T
.isNull()) {
7187 if (T
.getObjCLifetime() != Qualifiers::OCL_None
)
7188 return T
.getObjCLifetime();
7189 if (T
->isArrayType())
7190 T
= getBaseElementType(T
);
7191 else if (const auto *PT
= T
->getAs
<PointerType
>())
7192 T
= PT
->getPointeeType();
7193 else if (const auto *RT
= T
->getAs
<ReferenceType
>())
7194 T
= RT
->getPointeeType();
7199 return Qualifiers::OCL_None
;
7202 static const Type
*getIntegerTypeForEnum(const EnumType
*ET
) {
7203 // Incomplete enum types are not treated as integer types.
7204 // FIXME: In C++, enum types are never integer types.
7205 if (ET
->getDecl()->isComplete() && !ET
->getDecl()->isScoped())
7206 return ET
->getDecl()->getIntegerType().getTypePtr();
7210 /// getIntegerTypeOrder - Returns the highest ranked integer type:
7211 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
7212 /// LHS < RHS, return -1.
7213 int ASTContext::getIntegerTypeOrder(QualType LHS
, QualType RHS
) const {
7214 const Type
*LHSC
= getCanonicalType(LHS
).getTypePtr();
7215 const Type
*RHSC
= getCanonicalType(RHS
).getTypePtr();
7217 // Unwrap enums to their underlying type.
7218 if (const auto *ET
= dyn_cast
<EnumType
>(LHSC
))
7219 LHSC
= getIntegerTypeForEnum(ET
);
7220 if (const auto *ET
= dyn_cast
<EnumType
>(RHSC
))
7221 RHSC
= getIntegerTypeForEnum(ET
);
7223 if (LHSC
== RHSC
) return 0;
7225 bool LHSUnsigned
= LHSC
->isUnsignedIntegerType();
7226 bool RHSUnsigned
= RHSC
->isUnsignedIntegerType();
7228 unsigned LHSRank
= getIntegerRank(LHSC
);
7229 unsigned RHSRank
= getIntegerRank(RHSC
);
7231 if (LHSUnsigned
== RHSUnsigned
) { // Both signed or both unsigned.
7232 if (LHSRank
== RHSRank
) return 0;
7233 return LHSRank
> RHSRank
? 1 : -1;
7236 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
7238 // If the unsigned [LHS] type is larger, return it.
7239 if (LHSRank
>= RHSRank
)
7242 // If the signed type can represent all values of the unsigned type, it
7243 // wins. Because we are dealing with 2's complement and types that are
7244 // powers of two larger than each other, this is always safe.
7248 // If the unsigned [RHS] type is larger, return it.
7249 if (RHSRank
>= LHSRank
)
7252 // If the signed type can represent all values of the unsigned type, it
7253 // wins. Because we are dealing with 2's complement and types that are
7254 // powers of two larger than each other, this is always safe.
7258 TypedefDecl
*ASTContext::getCFConstantStringDecl() const {
7259 if (CFConstantStringTypeDecl
)
7260 return CFConstantStringTypeDecl
;
7262 assert(!CFConstantStringTagDecl
&&
7263 "tag and typedef should be initialized together");
7264 CFConstantStringTagDecl
= buildImplicitRecord("__NSConstantString_tag");
7265 CFConstantStringTagDecl
->startDefinition();
7275 /// typedef struct __NSConstantString_tag {
7278 /// const char *str;
7280 /// } __NSConstantString;
7282 /// Swift ABI (4.1, 4.2)
7284 /// typedef struct __NSConstantString_tag {
7285 /// uintptr_t _cfisa;
7286 /// uintptr_t _swift_rc;
7287 /// _Atomic(uint64_t) _cfinfoa;
7288 /// const char *_ptr;
7289 /// uint32_t _length;
7290 /// } __NSConstantString;
7294 /// typedef struct __NSConstantString_tag {
7295 /// uintptr_t _cfisa;
7296 /// uintptr_t _swift_rc;
7297 /// _Atomic(uint64_t) _cfinfoa;
7298 /// const char *_ptr;
7299 /// uintptr_t _length;
7300 /// } __NSConstantString;
7302 const auto CFRuntime
= getLangOpts().CFRuntime
;
7303 if (static_cast<unsigned>(CFRuntime
) <
7304 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift
)) {
7305 Fields
[Count
++] = { getPointerType(IntTy
.withConst()), "isa" };
7306 Fields
[Count
++] = { IntTy
, "flags" };
7307 Fields
[Count
++] = { getPointerType(CharTy
.withConst()), "str" };
7308 Fields
[Count
++] = { LongTy
, "length" };
7310 Fields
[Count
++] = { getUIntPtrType(), "_cfisa" };
7311 Fields
[Count
++] = { getUIntPtrType(), "_swift_rc" };
7312 Fields
[Count
++] = { getFromTargetType(Target
->getUInt64Type()), "_swift_rc" };
7313 Fields
[Count
++] = { getPointerType(CharTy
.withConst()), "_ptr" };
7314 if (CFRuntime
== LangOptions::CoreFoundationABI::Swift4_1
||
7315 CFRuntime
== LangOptions::CoreFoundationABI::Swift4_2
)
7316 Fields
[Count
++] = { IntTy
, "_ptr" };
7318 Fields
[Count
++] = { getUIntPtrType(), "_ptr" };
7322 for (unsigned i
= 0; i
< Count
; ++i
) {
7324 FieldDecl::Create(*this, CFConstantStringTagDecl
, SourceLocation(),
7325 SourceLocation(), &Idents
.get(Fields
[i
].Name
),
7326 Fields
[i
].Type
, /*TInfo=*/nullptr,
7327 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit
);
7328 Field
->setAccess(AS_public
);
7329 CFConstantStringTagDecl
->addDecl(Field
);
7332 CFConstantStringTagDecl
->completeDefinition();
7333 // This type is designed to be compatible with NSConstantString, but cannot
7334 // use the same name, since NSConstantString is an interface.
7335 auto tagType
= getTagDeclType(CFConstantStringTagDecl
);
7336 CFConstantStringTypeDecl
=
7337 buildImplicitTypedef(tagType
, "__NSConstantString");
7339 return CFConstantStringTypeDecl
;
7342 RecordDecl
*ASTContext::getCFConstantStringTagDecl() const {
7343 if (!CFConstantStringTagDecl
)
7344 getCFConstantStringDecl(); // Build the tag and the typedef.
7345 return CFConstantStringTagDecl
;
7348 // getCFConstantStringType - Return the type used for constant CFStrings.
7349 QualType
ASTContext::getCFConstantStringType() const {
7350 return getTypedefType(getCFConstantStringDecl());
7353 QualType
ASTContext::getObjCSuperType() const {
7354 if (ObjCSuperType
.isNull()) {
7355 RecordDecl
*ObjCSuperTypeDecl
= buildImplicitRecord("objc_super");
7356 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl
);
7357 ObjCSuperType
= getTagDeclType(ObjCSuperTypeDecl
);
7359 return ObjCSuperType
;
7362 void ASTContext::setCFConstantStringType(QualType T
) {
7363 const auto *TD
= T
->castAs
<TypedefType
>();
7364 CFConstantStringTypeDecl
= cast
<TypedefDecl
>(TD
->getDecl());
7365 const auto *TagType
=
7366 CFConstantStringTypeDecl
->getUnderlyingType()->castAs
<RecordType
>();
7367 CFConstantStringTagDecl
= TagType
->getDecl();
7370 QualType
ASTContext::getBlockDescriptorType() const {
7371 if (BlockDescriptorType
)
7372 return getTagDeclType(BlockDescriptorType
);
7375 // FIXME: Needs the FlagAppleBlock bit.
7376 RD
= buildImplicitRecord("__block_descriptor");
7377 RD
->startDefinition();
7379 QualType FieldTypes
[] = {
7384 static const char *const FieldNames
[] = {
7389 for (size_t i
= 0; i
< 2; ++i
) {
7390 FieldDecl
*Field
= FieldDecl::Create(
7391 *this, RD
, SourceLocation(), SourceLocation(),
7392 &Idents
.get(FieldNames
[i
]), FieldTypes
[i
], /*TInfo=*/nullptr,
7393 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit
);
7394 Field
->setAccess(AS_public
);
7398 RD
->completeDefinition();
7400 BlockDescriptorType
= RD
;
7402 return getTagDeclType(BlockDescriptorType
);
7405 QualType
ASTContext::getBlockDescriptorExtendedType() const {
7406 if (BlockDescriptorExtendedType
)
7407 return getTagDeclType(BlockDescriptorExtendedType
);
7410 // FIXME: Needs the FlagAppleBlock bit.
7411 RD
= buildImplicitRecord("__block_descriptor_withcopydispose");
7412 RD
->startDefinition();
7414 QualType FieldTypes
[] = {
7417 getPointerType(VoidPtrTy
),
7418 getPointerType(VoidPtrTy
)
7421 static const char *const FieldNames
[] = {
7428 for (size_t i
= 0; i
< 4; ++i
) {
7429 FieldDecl
*Field
= FieldDecl::Create(
7430 *this, RD
, SourceLocation(), SourceLocation(),
7431 &Idents
.get(FieldNames
[i
]), FieldTypes
[i
], /*TInfo=*/nullptr,
7432 /*BitWidth=*/nullptr,
7433 /*Mutable=*/false, ICIS_NoInit
);
7434 Field
->setAccess(AS_public
);
7438 RD
->completeDefinition();
7440 BlockDescriptorExtendedType
= RD
;
7441 return getTagDeclType(BlockDescriptorExtendedType
);
7444 OpenCLTypeKind
ASTContext::getOpenCLTypeKind(const Type
*T
) const {
7445 const auto *BT
= dyn_cast
<BuiltinType
>(T
);
7448 if (isa
<PipeType
>(T
))
7451 return OCLTK_Default
;
7454 switch (BT
->getKind()) {
7455 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
7456 case BuiltinType::Id: \
7458 #include "clang/Basic/OpenCLImageTypes.def"
7460 case BuiltinType::OCLClkEvent
:
7461 return OCLTK_ClkEvent
;
7463 case BuiltinType::OCLEvent
:
7466 case BuiltinType::OCLQueue
:
7469 case BuiltinType::OCLReserveID
:
7470 return OCLTK_ReserveID
;
7472 case BuiltinType::OCLSampler
:
7473 return OCLTK_Sampler
;
7476 return OCLTK_Default
;
7480 LangAS
ASTContext::getOpenCLTypeAddrSpace(const Type
*T
) const {
7481 return Target
->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T
));
7484 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
7485 /// requires copy/dispose. Note that this must match the logic
7486 /// in buildByrefHelpers.
7487 bool ASTContext::BlockRequiresCopying(QualType Ty
,
7489 if (const CXXRecordDecl
*record
= Ty
->getAsCXXRecordDecl()) {
7490 const Expr
*copyExpr
= getBlockVarCopyInit(D
).getCopyExpr();
7491 if (!copyExpr
&& record
->hasTrivialDestructor()) return false;
7496 // The block needs copy/destroy helpers if Ty is non-trivial to destructively
7498 if (Ty
.isNonTrivialToPrimitiveDestructiveMove() || Ty
.isDestructedType())
7501 if (!Ty
->isObjCRetainableType()) return false;
7503 Qualifiers qs
= Ty
.getQualifiers();
7505 // If we have lifetime, that dominates.
7506 if (Qualifiers::ObjCLifetime lifetime
= qs
.getObjCLifetime()) {
7508 case Qualifiers::OCL_None
: llvm_unreachable("impossible");
7510 // These are just bits as far as the runtime is concerned.
7511 case Qualifiers::OCL_ExplicitNone
:
7512 case Qualifiers::OCL_Autoreleasing
:
7515 // These cases should have been taken care of when checking the type's
7517 case Qualifiers::OCL_Weak
:
7518 case Qualifiers::OCL_Strong
:
7519 llvm_unreachable("impossible");
7521 llvm_unreachable("fell out of lifetime switch!");
7523 return (Ty
->isBlockPointerType() || isObjCNSObjectType(Ty
) ||
7524 Ty
->isObjCObjectPointerType());
7527 bool ASTContext::getByrefLifetime(QualType Ty
,
7528 Qualifiers::ObjCLifetime
&LifeTime
,
7529 bool &HasByrefExtendedLayout
) const {
7530 if (!getLangOpts().ObjC
||
7531 getLangOpts().getGC() != LangOptions::NonGC
)
7534 HasByrefExtendedLayout
= false;
7535 if (Ty
->isRecordType()) {
7536 HasByrefExtendedLayout
= true;
7537 LifeTime
= Qualifiers::OCL_None
;
7538 } else if ((LifeTime
= Ty
.getObjCLifetime())) {
7539 // Honor the ARC qualifiers.
7540 } else if (Ty
->isObjCObjectPointerType() || Ty
->isBlockPointerType()) {
7542 LifeTime
= Qualifiers::OCL_ExplicitNone
;
7544 LifeTime
= Qualifiers::OCL_None
;
7549 CanQualType
ASTContext::getNSUIntegerType() const {
7550 assert(Target
&& "Expected target to be initialized");
7551 const llvm::Triple
&T
= Target
->getTriple();
7552 // Windows is LLP64 rather than LP64
7553 if (T
.isOSWindows() && T
.isArch64Bit())
7554 return UnsignedLongLongTy
;
7555 return UnsignedLongTy
;
7558 CanQualType
ASTContext::getNSIntegerType() const {
7559 assert(Target
&& "Expected target to be initialized");
7560 const llvm::Triple
&T
= Target
->getTriple();
7561 // Windows is LLP64 rather than LP64
7562 if (T
.isOSWindows() && T
.isArch64Bit())
7567 TypedefDecl
*ASTContext::getObjCInstanceTypeDecl() {
7568 if (!ObjCInstanceTypeDecl
)
7569 ObjCInstanceTypeDecl
=
7570 buildImplicitTypedef(getObjCIdType(), "instancetype");
7571 return ObjCInstanceTypeDecl
;
7574 // This returns true if a type has been typedefed to BOOL:
7575 // typedef <type> BOOL;
7576 static bool isTypeTypedefedAsBOOL(QualType T
) {
7577 if (const auto *TT
= dyn_cast
<TypedefType
>(T
))
7578 if (IdentifierInfo
*II
= TT
->getDecl()->getIdentifier())
7579 return II
->isStr("BOOL");
7584 /// getObjCEncodingTypeSize returns size of type for objective-c encoding
7586 CharUnits
ASTContext::getObjCEncodingTypeSize(QualType type
) const {
7587 if (!type
->isIncompleteArrayType() && type
->isIncompleteType())
7588 return CharUnits::Zero();
7590 CharUnits sz
= getTypeSizeInChars(type
);
7592 // Make all integer and enum types at least as large as an int
7593 if (sz
.isPositive() && type
->isIntegralOrEnumerationType())
7594 sz
= std::max(sz
, getTypeSizeInChars(IntTy
));
7595 // Treat arrays as pointers, since that's how they're passed in.
7596 else if (type
->isArrayType())
7597 sz
= getTypeSizeInChars(VoidPtrTy
);
7601 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl
*VD
) const {
7602 return getTargetInfo().getCXXABI().isMicrosoft() &&
7603 VD
->isStaticDataMember() &&
7604 VD
->getType()->isIntegralOrEnumerationType() &&
7605 !VD
->getFirstDecl()->isOutOfLine() && VD
->getFirstDecl()->hasInit();
7608 ASTContext::InlineVariableDefinitionKind
7609 ASTContext::getInlineVariableDefinitionKind(const VarDecl
*VD
) const {
7610 if (!VD
->isInline())
7611 return InlineVariableDefinitionKind::None
;
7613 // In almost all cases, it's a weak definition.
7614 auto *First
= VD
->getFirstDecl();
7615 if (First
->isInlineSpecified() || !First
->isStaticDataMember())
7616 return InlineVariableDefinitionKind::Weak
;
7618 // If there's a file-context declaration in this translation unit, it's a
7619 // non-discardable definition.
7620 for (auto *D
: VD
->redecls())
7621 if (D
->getLexicalDeclContext()->isFileContext() &&
7622 !D
->isInlineSpecified() && (D
->isConstexpr() || First
->isConstexpr()))
7623 return InlineVariableDefinitionKind::Strong
;
7625 // If we've not seen one yet, we don't know.
7626 return InlineVariableDefinitionKind::WeakUnknown
;
7629 static std::string
charUnitsToString(const CharUnits
&CU
) {
7630 return llvm::itostr(CU
.getQuantity());
7633 /// getObjCEncodingForBlock - Return the encoded type for this block
7635 std::string
ASTContext::getObjCEncodingForBlock(const BlockExpr
*Expr
) const {
7638 const BlockDecl
*Decl
= Expr
->getBlockDecl();
7640 Expr
->getType()->castAs
<BlockPointerType
>()->getPointeeType();
7641 QualType BlockReturnTy
= BlockTy
->castAs
<FunctionType
>()->getReturnType();
7642 // Encode result type.
7643 if (getLangOpts().EncodeExtendedBlockSig
)
7644 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None
, BlockReturnTy
, S
,
7647 getObjCEncodingForType(BlockReturnTy
, S
);
7648 // Compute size of all parameters.
7649 // Start with computing size of a pointer in number of bytes.
7650 // FIXME: There might(should) be a better way of doing this computation!
7651 CharUnits PtrSize
= getTypeSizeInChars(VoidPtrTy
);
7652 CharUnits ParmOffset
= PtrSize
;
7653 for (auto *PI
: Decl
->parameters()) {
7654 QualType PType
= PI
->getType();
7655 CharUnits sz
= getObjCEncodingTypeSize(PType
);
7658 assert(sz
.isPositive() && "BlockExpr - Incomplete param type");
7661 // Size of the argument frame
7662 S
+= charUnitsToString(ParmOffset
);
7663 // Block pointer and offset.
7667 ParmOffset
= PtrSize
;
7668 for (auto *PVDecl
: Decl
->parameters()) {
7669 QualType PType
= PVDecl
->getOriginalType();
7670 if (const auto *AT
=
7671 dyn_cast
<ArrayType
>(PType
->getCanonicalTypeInternal())) {
7672 // Use array's original type only if it has known number of
7674 if (!isa
<ConstantArrayType
>(AT
))
7675 PType
= PVDecl
->getType();
7676 } else if (PType
->isFunctionType())
7677 PType
= PVDecl
->getType();
7678 if (getLangOpts().EncodeExtendedBlockSig
)
7679 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None
, PType
,
7680 S
, true /*Extended*/);
7682 getObjCEncodingForType(PType
, S
);
7683 S
+= charUnitsToString(ParmOffset
);
7684 ParmOffset
+= getObjCEncodingTypeSize(PType
);
7691 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl
*Decl
) const {
7693 // Encode result type.
7694 getObjCEncodingForType(Decl
->getReturnType(), S
);
7695 CharUnits ParmOffset
;
7696 // Compute size of all parameters.
7697 for (auto *PI
: Decl
->parameters()) {
7698 QualType PType
= PI
->getType();
7699 CharUnits sz
= getObjCEncodingTypeSize(PType
);
7703 assert(sz
.isPositive() &&
7704 "getObjCEncodingForFunctionDecl - Incomplete param type");
7707 S
+= charUnitsToString(ParmOffset
);
7708 ParmOffset
= CharUnits::Zero();
7711 for (auto *PVDecl
: Decl
->parameters()) {
7712 QualType PType
= PVDecl
->getOriginalType();
7713 if (const auto *AT
=
7714 dyn_cast
<ArrayType
>(PType
->getCanonicalTypeInternal())) {
7715 // Use array's original type only if it has known number of
7717 if (!isa
<ConstantArrayType
>(AT
))
7718 PType
= PVDecl
->getType();
7719 } else if (PType
->isFunctionType())
7720 PType
= PVDecl
->getType();
7721 getObjCEncodingForType(PType
, S
);
7722 S
+= charUnitsToString(ParmOffset
);
7723 ParmOffset
+= getObjCEncodingTypeSize(PType
);
7729 /// getObjCEncodingForMethodParameter - Return the encoded type for a single
7730 /// method parameter or return type. If Extended, include class names and
7731 /// block object types.
7732 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT
,
7733 QualType T
, std::string
& S
,
7734 bool Extended
) const {
7735 // Encode type qualifier, 'in', 'inout', etc. for the parameter.
7736 getObjCEncodingForTypeQualifier(QT
, S
);
7737 // Encode parameter type.
7738 ObjCEncOptions Options
= ObjCEncOptions()
7739 .setExpandPointedToStructures()
7740 .setExpandStructures()
7741 .setIsOutermostType();
7743 Options
.setEncodeBlockParameters().setEncodeClassNames();
7744 getObjCEncodingForTypeImpl(T
, S
, Options
, /*Field=*/nullptr);
7747 /// getObjCEncodingForMethodDecl - Return the encoded type for this method
7749 std::string
ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl
*Decl
,
7750 bool Extended
) const {
7751 // FIXME: This is not very efficient.
7752 // Encode return type.
7754 getObjCEncodingForMethodParameter(Decl
->getObjCDeclQualifier(),
7755 Decl
->getReturnType(), S
, Extended
);
7756 // Compute size of all parameters.
7757 // Start with computing size of a pointer in number of bytes.
7758 // FIXME: There might(should) be a better way of doing this computation!
7759 CharUnits PtrSize
= getTypeSizeInChars(VoidPtrTy
);
7760 // The first two arguments (self and _cmd) are pointers; account for
7762 CharUnits ParmOffset
= 2 * PtrSize
;
7763 for (ObjCMethodDecl::param_const_iterator PI
= Decl
->param_begin(),
7764 E
= Decl
->sel_param_end(); PI
!= E
; ++PI
) {
7765 QualType PType
= (*PI
)->getType();
7766 CharUnits sz
= getObjCEncodingTypeSize(PType
);
7770 assert(sz
.isPositive() &&
7771 "getObjCEncodingForMethodDecl - Incomplete param type");
7774 S
+= charUnitsToString(ParmOffset
);
7776 S
+= charUnitsToString(PtrSize
);
7779 ParmOffset
= 2 * PtrSize
;
7780 for (ObjCMethodDecl::param_const_iterator PI
= Decl
->param_begin(),
7781 E
= Decl
->sel_param_end(); PI
!= E
; ++PI
) {
7782 const ParmVarDecl
*PVDecl
= *PI
;
7783 QualType PType
= PVDecl
->getOriginalType();
7784 if (const auto *AT
=
7785 dyn_cast
<ArrayType
>(PType
->getCanonicalTypeInternal())) {
7786 // Use array's original type only if it has known number of
7788 if (!isa
<ConstantArrayType
>(AT
))
7789 PType
= PVDecl
->getType();
7790 } else if (PType
->isFunctionType())
7791 PType
= PVDecl
->getType();
7792 getObjCEncodingForMethodParameter(PVDecl
->getObjCDeclQualifier(),
7793 PType
, S
, Extended
);
7794 S
+= charUnitsToString(ParmOffset
);
7795 ParmOffset
+= getObjCEncodingTypeSize(PType
);
7801 ObjCPropertyImplDecl
*
7802 ASTContext::getObjCPropertyImplDeclForPropertyDecl(
7803 const ObjCPropertyDecl
*PD
,
7804 const Decl
*Container
) const {
7807 if (const auto *CID
= dyn_cast
<ObjCCategoryImplDecl
>(Container
)) {
7808 for (auto *PID
: CID
->property_impls())
7809 if (PID
->getPropertyDecl() == PD
)
7812 const auto *OID
= cast
<ObjCImplementationDecl
>(Container
);
7813 for (auto *PID
: OID
->property_impls())
7814 if (PID
->getPropertyDecl() == PD
)
7820 /// getObjCEncodingForPropertyDecl - Return the encoded type for this
7821 /// property declaration. If non-NULL, Container must be either an
7822 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
7823 /// NULL when getting encodings for protocol properties.
7824 /// Property attributes are stored as a comma-delimited C string. The simple
7825 /// attributes readonly and bycopy are encoded as single characters. The
7826 /// parametrized attributes, getter=name, setter=name, and ivar=name, are
7827 /// encoded as single characters, followed by an identifier. Property types
7828 /// are also encoded as a parametrized attribute. The characters used to encode
7829 /// these attributes are defined by the following enumeration:
7831 /// enum PropertyAttributes {
7832 /// kPropertyReadOnly = 'R', // property is read-only.
7833 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned
7834 /// kPropertyByref = '&', // property is a reference to the value last assigned
7835 /// kPropertyDynamic = 'D', // property is dynamic
7836 /// kPropertyGetter = 'G', // followed by getter selector name
7837 /// kPropertySetter = 'S', // followed by setter selector name
7838 /// kPropertyInstanceVariable = 'V' // followed by instance variable name
7839 /// kPropertyType = 'T' // followed by old-style type encoding.
7840 /// kPropertyWeak = 'W' // 'weak' property
7841 /// kPropertyStrong = 'P' // property GC'able
7842 /// kPropertyNonAtomic = 'N' // property non-atomic
7843 /// kPropertyOptional = '?' // property optional
7847 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl
*PD
,
7848 const Decl
*Container
) const {
7849 // Collect information from the property implementation decl(s).
7850 bool Dynamic
= false;
7851 ObjCPropertyImplDecl
*SynthesizePID
= nullptr;
7853 if (ObjCPropertyImplDecl
*PropertyImpDecl
=
7854 getObjCPropertyImplDeclForPropertyDecl(PD
, Container
)) {
7855 if (PropertyImpDecl
->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic
)
7858 SynthesizePID
= PropertyImpDecl
;
7861 // FIXME: This is not very efficient.
7862 std::string S
= "T";
7864 // Encode result type.
7865 // GCC has some special rules regarding encoding of properties which
7866 // closely resembles encoding of ivars.
7867 getObjCEncodingForPropertyType(PD
->getType(), S
);
7869 if (PD
->isOptional())
7872 if (PD
->isReadOnly()) {
7874 if (PD
->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy
)
7876 if (PD
->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain
)
7878 if (PD
->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak
)
7881 switch (PD
->getSetterKind()) {
7882 case ObjCPropertyDecl::Assign
: break;
7883 case ObjCPropertyDecl::Copy
: S
+= ",C"; break;
7884 case ObjCPropertyDecl::Retain
: S
+= ",&"; break;
7885 case ObjCPropertyDecl::Weak
: S
+= ",W"; break;
7889 // It really isn't clear at all what this means, since properties
7890 // are "dynamic by default".
7894 if (PD
->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic
)
7897 if (PD
->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter
) {
7899 S
+= PD
->getGetterName().getAsString();
7902 if (PD
->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter
) {
7904 S
+= PD
->getSetterName().getAsString();
7907 if (SynthesizePID
) {
7908 const ObjCIvarDecl
*OID
= SynthesizePID
->getPropertyIvarDecl();
7910 S
+= OID
->getNameAsString();
7913 // FIXME: OBJCGC: weak & strong
7917 /// getLegacyIntegralTypeEncoding -
7918 /// Another legacy compatibility encoding: 32-bit longs are encoded as
7919 /// 'l' or 'L' , but not always. For typedefs, we need to use
7920 /// 'i' or 'I' instead if encoding a struct field, or a pointer!
7921 void ASTContext::getLegacyIntegralTypeEncoding (QualType
&PointeeTy
) const {
7922 if (PointeeTy
->getAs
<TypedefType
>()) {
7923 if (const auto *BT
= PointeeTy
->getAs
<BuiltinType
>()) {
7924 if (BT
->getKind() == BuiltinType::ULong
&& getIntWidth(PointeeTy
) == 32)
7925 PointeeTy
= UnsignedIntTy
;
7927 if (BT
->getKind() == BuiltinType::Long
&& getIntWidth(PointeeTy
) == 32)
7933 void ASTContext::getObjCEncodingForType(QualType T
, std::string
& S
,
7934 const FieldDecl
*Field
,
7935 QualType
*NotEncodedT
) const {
7936 // We follow the behavior of gcc, expanding structures which are
7937 // directly pointed to, and expanding embedded structures. Note that
7938 // these rules are sufficient to prevent recursive encoding of the
7940 getObjCEncodingForTypeImpl(T
, S
,
7942 .setExpandPointedToStructures()
7943 .setExpandStructures()
7944 .setIsOutermostType(),
7945 Field
, NotEncodedT
);
7948 void ASTContext::getObjCEncodingForPropertyType(QualType T
,
7949 std::string
& S
) const {
7950 // Encode result type.
7951 // GCC has some special rules regarding encoding of properties which
7952 // closely resembles encoding of ivars.
7953 getObjCEncodingForTypeImpl(T
, S
,
7955 .setExpandPointedToStructures()
7956 .setExpandStructures()
7957 .setIsOutermostType()
7958 .setEncodingProperty(),
7962 static char getObjCEncodingForPrimitiveType(const ASTContext
*C
,
7963 const BuiltinType
*BT
) {
7964 BuiltinType::Kind kind
= BT
->getKind();
7966 case BuiltinType::Void
: return 'v';
7967 case BuiltinType::Bool
: return 'B';
7968 case BuiltinType::Char8
:
7969 case BuiltinType::Char_U
:
7970 case BuiltinType::UChar
: return 'C';
7971 case BuiltinType::Char16
:
7972 case BuiltinType::UShort
: return 'S';
7973 case BuiltinType::Char32
:
7974 case BuiltinType::UInt
: return 'I';
7975 case BuiltinType::ULong
:
7976 return C
->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
7977 case BuiltinType::UInt128
: return 'T';
7978 case BuiltinType::ULongLong
: return 'Q';
7979 case BuiltinType::Char_S
:
7980 case BuiltinType::SChar
: return 'c';
7981 case BuiltinType::Short
: return 's';
7982 case BuiltinType::WChar_S
:
7983 case BuiltinType::WChar_U
:
7984 case BuiltinType::Int
: return 'i';
7985 case BuiltinType::Long
:
7986 return C
->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
7987 case BuiltinType::LongLong
: return 'q';
7988 case BuiltinType::Int128
: return 't';
7989 case BuiltinType::Float
: return 'f';
7990 case BuiltinType::Double
: return 'd';
7991 case BuiltinType::LongDouble
: return 'D';
7992 case BuiltinType::NullPtr
: return '*'; // like char*
7994 case BuiltinType::BFloat16
:
7995 case BuiltinType::Float16
:
7996 case BuiltinType::Float128
:
7997 case BuiltinType::Ibm128
:
7998 case BuiltinType::Half
:
7999 case BuiltinType::ShortAccum
:
8000 case BuiltinType::Accum
:
8001 case BuiltinType::LongAccum
:
8002 case BuiltinType::UShortAccum
:
8003 case BuiltinType::UAccum
:
8004 case BuiltinType::ULongAccum
:
8005 case BuiltinType::ShortFract
:
8006 case BuiltinType::Fract
:
8007 case BuiltinType::LongFract
:
8008 case BuiltinType::UShortFract
:
8009 case BuiltinType::UFract
:
8010 case BuiltinType::ULongFract
:
8011 case BuiltinType::SatShortAccum
:
8012 case BuiltinType::SatAccum
:
8013 case BuiltinType::SatLongAccum
:
8014 case BuiltinType::SatUShortAccum
:
8015 case BuiltinType::SatUAccum
:
8016 case BuiltinType::SatULongAccum
:
8017 case BuiltinType::SatShortFract
:
8018 case BuiltinType::SatFract
:
8019 case BuiltinType::SatLongFract
:
8020 case BuiltinType::SatUShortFract
:
8021 case BuiltinType::SatUFract
:
8022 case BuiltinType::SatULongFract
:
8023 // FIXME: potentially need @encodes for these!
8026 #define SVE_TYPE(Name, Id, SingletonId) \
8027 case BuiltinType::Id:
8028 #include "clang/Basic/AArch64SVEACLETypes.def"
8029 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
8030 #include "clang/Basic/RISCVVTypes.def"
8031 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
8032 #include "clang/Basic/WebAssemblyReferenceTypes.def"
8034 DiagnosticsEngine
&Diags
= C
->getDiagnostics();
8035 unsigned DiagID
= Diags
.getCustomDiagID(DiagnosticsEngine::Error
,
8036 "cannot yet @encode type %0");
8037 Diags
.Report(DiagID
) << BT
->getName(C
->getPrintingPolicy());
8041 case BuiltinType::ObjCId
:
8042 case BuiltinType::ObjCClass
:
8043 case BuiltinType::ObjCSel
:
8044 llvm_unreachable("@encoding ObjC primitive type");
8046 // OpenCL and placeholder types don't need @encodings.
8047 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
8048 case BuiltinType::Id:
8049 #include "clang/Basic/OpenCLImageTypes.def"
8050 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
8051 case BuiltinType::Id:
8052 #include "clang/Basic/OpenCLExtensionTypes.def"
8053 case BuiltinType::OCLEvent
:
8054 case BuiltinType::OCLClkEvent
:
8055 case BuiltinType::OCLQueue
:
8056 case BuiltinType::OCLReserveID
:
8057 case BuiltinType::OCLSampler
:
8058 case BuiltinType::Dependent
:
8059 #define PPC_VECTOR_TYPE(Name, Id, Size) \
8060 case BuiltinType::Id:
8061 #include "clang/Basic/PPCTypes.def"
8062 #define BUILTIN_TYPE(KIND, ID)
8063 #define PLACEHOLDER_TYPE(KIND, ID) \
8064 case BuiltinType::KIND:
8065 #include "clang/AST/BuiltinTypes.def"
8066 llvm_unreachable("invalid builtin type for @encode");
8068 llvm_unreachable("invalid BuiltinType::Kind value");
8071 static char ObjCEncodingForEnumType(const ASTContext
*C
, const EnumType
*ET
) {
8072 EnumDecl
*Enum
= ET
->getDecl();
8074 // The encoding of an non-fixed enum type is always 'i', regardless of size.
8075 if (!Enum
->isFixed())
8078 // The encoding of a fixed enum type matches its fixed underlying type.
8079 const auto *BT
= Enum
->getIntegerType()->castAs
<BuiltinType
>();
8080 return getObjCEncodingForPrimitiveType(C
, BT
);
8083 static void EncodeBitField(const ASTContext
*Ctx
, std::string
& S
,
8084 QualType T
, const FieldDecl
*FD
) {
8085 assert(FD
->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
8087 // The NeXT runtime encodes bit fields as b followed by the number of bits.
8088 // The GNU runtime requires more information; bitfields are encoded as b,
8089 // then the offset (in bits) of the first element, then the type of the
8090 // bitfield, then the size in bits. For example, in this structure:
8097 // On a 32-bit system, the encoding for flags would be b2 for the NeXT
8098 // runtime, but b32i2 for the GNU runtime. The reason for this extra
8099 // information is not especially sensible, but we're stuck with it for
8100 // compatibility with GCC, although providing it breaks anything that
8101 // actually uses runtime introspection and wants to work on both runtimes...
8102 if (Ctx
->getLangOpts().ObjCRuntime
.isGNUFamily()) {
8105 if (const auto *IVD
= dyn_cast
<ObjCIvarDecl
>(FD
)) {
8106 Offset
= Ctx
->lookupFieldBitOffset(IVD
->getContainingInterface(), nullptr,
8109 const RecordDecl
*RD
= FD
->getParent();
8110 const ASTRecordLayout
&RL
= Ctx
->getASTRecordLayout(RD
);
8111 Offset
= RL
.getFieldOffset(FD
->getFieldIndex());
8114 S
+= llvm::utostr(Offset
);
8116 if (const auto *ET
= T
->getAs
<EnumType
>())
8117 S
+= ObjCEncodingForEnumType(Ctx
, ET
);
8119 const auto *BT
= T
->castAs
<BuiltinType
>();
8120 S
+= getObjCEncodingForPrimitiveType(Ctx
, BT
);
8123 S
+= llvm::utostr(FD
->getBitWidthValue(*Ctx
));
8126 // Helper function for determining whether the encoded type string would include
8127 // a template specialization type.
8128 static bool hasTemplateSpecializationInEncodedString(const Type
*T
,
8129 bool VisitBasesAndFields
) {
8130 T
= T
->getBaseElementTypeUnsafe();
8132 if (auto *PT
= T
->getAs
<PointerType
>())
8133 return hasTemplateSpecializationInEncodedString(
8134 PT
->getPointeeType().getTypePtr(), false);
8136 auto *CXXRD
= T
->getAsCXXRecordDecl();
8141 if (isa
<ClassTemplateSpecializationDecl
>(CXXRD
))
8144 if (!CXXRD
->hasDefinition() || !VisitBasesAndFields
)
8147 for (const auto &B
: CXXRD
->bases())
8148 if (hasTemplateSpecializationInEncodedString(B
.getType().getTypePtr(),
8152 for (auto *FD
: CXXRD
->fields())
8153 if (hasTemplateSpecializationInEncodedString(FD
->getType().getTypePtr(),
8160 // FIXME: Use SmallString for accumulating string.
8161 void ASTContext::getObjCEncodingForTypeImpl(QualType T
, std::string
&S
,
8162 const ObjCEncOptions Options
,
8163 const FieldDecl
*FD
,
8164 QualType
*NotEncodedT
) const {
8165 CanQualType CT
= getCanonicalType(T
);
8166 switch (CT
->getTypeClass()) {
8169 if (FD
&& FD
->isBitField())
8170 return EncodeBitField(this, S
, T
, FD
);
8171 if (const auto *BT
= dyn_cast
<BuiltinType
>(CT
))
8172 S
+= getObjCEncodingForPrimitiveType(this, BT
);
8174 S
+= ObjCEncodingForEnumType(this, cast
<EnumType
>(CT
));
8179 getObjCEncodingForTypeImpl(T
->castAs
<ComplexType
>()->getElementType(), S
,
8186 getObjCEncodingForTypeImpl(T
->castAs
<AtomicType
>()->getValueType(), S
,
8191 // encoding for pointer or reference types.
8193 case Type::LValueReference
:
8194 case Type::RValueReference
: {
8196 if (isa
<PointerType
>(CT
)) {
8197 const auto *PT
= T
->castAs
<PointerType
>();
8198 if (PT
->isObjCSelType()) {
8202 PointeeTy
= PT
->getPointeeType();
8204 PointeeTy
= T
->castAs
<ReferenceType
>()->getPointeeType();
8207 bool isReadOnly
= false;
8208 // For historical/compatibility reasons, the read-only qualifier of the
8209 // pointee gets emitted _before_ the '^'. The read-only qualifier of
8210 // the pointer itself gets ignored, _unless_ we are looking at a typedef!
8211 // Also, do not emit the 'r' for anything but the outermost type!
8212 if (T
->getAs
<TypedefType
>()) {
8213 if (Options
.IsOutermostType() && T
.isConstQualified()) {
8217 } else if (Options
.IsOutermostType()) {
8218 QualType P
= PointeeTy
;
8219 while (auto PT
= P
->getAs
<PointerType
>())
8220 P
= PT
->getPointeeType();
8221 if (P
.isConstQualified()) {
8227 // Another legacy compatibility encoding. Some ObjC qualifier and type
8228 // combinations need to be rearranged.
8229 // Rewrite "in const" from "nr" to "rn"
8230 if (StringRef(S
).endswith("nr"))
8231 S
.replace(S
.end()-2, S
.end(), "rn");
8234 if (PointeeTy
->isCharType()) {
8235 // char pointer types should be encoded as '*' unless it is a
8236 // type that has been typedef'd to 'BOOL'.
8237 if (!isTypeTypedefedAsBOOL(PointeeTy
)) {
8241 } else if (const auto *RTy
= PointeeTy
->getAs
<RecordType
>()) {
8242 // GCC binary compat: Need to convert "struct objc_class *" to "#".
8243 if (RTy
->getDecl()->getIdentifier() == &Idents
.get("objc_class")) {
8247 // GCC binary compat: Need to convert "struct objc_object *" to "@".
8248 if (RTy
->getDecl()->getIdentifier() == &Idents
.get("objc_object")) {
8252 // If the encoded string for the class includes template names, just emit
8253 // "^v" for pointers to the class.
8254 if (getLangOpts().CPlusPlus
&&
8255 (!getLangOpts().EncodeCXXClassTemplateSpec
&&
8256 hasTemplateSpecializationInEncodedString(
8257 RTy
, Options
.ExpandPointedToStructures()))) {
8264 getLegacyIntegralTypeEncoding(PointeeTy
);
8266 ObjCEncOptions NewOptions
;
8267 if (Options
.ExpandPointedToStructures())
8268 NewOptions
.setExpandStructures();
8269 getObjCEncodingForTypeImpl(PointeeTy
, S
, NewOptions
,
8270 /*Field=*/nullptr, NotEncodedT
);
8274 case Type::ConstantArray
:
8275 case Type::IncompleteArray
:
8276 case Type::VariableArray
: {
8277 const auto *AT
= cast
<ArrayType
>(CT
);
8279 if (isa
<IncompleteArrayType
>(AT
) && !Options
.IsStructField()) {
8280 // Incomplete arrays are encoded as a pointer to the array element.
8283 getObjCEncodingForTypeImpl(
8284 AT
->getElementType(), S
,
8285 Options
.keepingOnly(ObjCEncOptions().setExpandStructures()), FD
);
8289 if (const auto *CAT
= dyn_cast
<ConstantArrayType
>(AT
))
8290 S
+= llvm::utostr(CAT
->getSize().getZExtValue());
8292 //Variable length arrays are encoded as a regular array with 0 elements.
8293 assert((isa
<VariableArrayType
>(AT
) || isa
<IncompleteArrayType
>(AT
)) &&
8294 "Unknown array type!");
8298 getObjCEncodingForTypeImpl(
8299 AT
->getElementType(), S
,
8300 Options
.keepingOnly(ObjCEncOptions().setExpandStructures()), FD
,
8307 case Type::FunctionNoProto
:
8308 case Type::FunctionProto
:
8312 case Type::Record
: {
8313 RecordDecl
*RDecl
= cast
<RecordType
>(CT
)->getDecl();
8314 S
+= RDecl
->isUnion() ? '(' : '{';
8315 // Anonymous structures print as '?'
8316 if (const IdentifierInfo
*II
= RDecl
->getIdentifier()) {
8318 if (const auto *Spec
= dyn_cast
<ClassTemplateSpecializationDecl
>(RDecl
)) {
8319 const TemplateArgumentList
&TemplateArgs
= Spec
->getTemplateArgs();
8320 llvm::raw_string_ostream
OS(S
);
8321 printTemplateArgumentList(OS
, TemplateArgs
.asArray(),
8322 getPrintingPolicy());
8327 if (Options
.ExpandStructures()) {
8329 if (!RDecl
->isUnion()) {
8330 getObjCEncodingForStructureImpl(RDecl
, S
, FD
, true, NotEncodedT
);
8332 for (const auto *Field
: RDecl
->fields()) {
8335 S
+= Field
->getNameAsString();
8339 // Special case bit-fields.
8340 if (Field
->isBitField()) {
8341 getObjCEncodingForTypeImpl(Field
->getType(), S
,
8342 ObjCEncOptions().setExpandStructures(),
8345 QualType qt
= Field
->getType();
8346 getLegacyIntegralTypeEncoding(qt
);
8347 getObjCEncodingForTypeImpl(
8349 ObjCEncOptions().setExpandStructures().setIsStructField(), FD
,
8355 S
+= RDecl
->isUnion() ? ')' : '}';
8359 case Type::BlockPointer
: {
8360 const auto *BT
= T
->castAs
<BlockPointerType
>();
8361 S
+= "@?"; // Unlike a pointer-to-function, which is "^?".
8362 if (Options
.EncodeBlockParameters()) {
8363 const auto *FT
= BT
->getPointeeType()->castAs
<FunctionType
>();
8366 // Block return type
8367 getObjCEncodingForTypeImpl(FT
->getReturnType(), S
,
8368 Options
.forComponentType(), FD
, NotEncodedT
);
8372 if (const auto *FPT
= dyn_cast
<FunctionProtoType
>(FT
)) {
8373 for (const auto &I
: FPT
->param_types())
8374 getObjCEncodingForTypeImpl(I
, S
, Options
.forComponentType(), FD
,
8382 case Type::ObjCObject
: {
8383 // hack to match legacy encoding of *id and *Class
8384 QualType Ty
= getObjCObjectPointerType(CT
);
8385 if (Ty
->isObjCIdType()) {
8386 S
+= "{objc_object=}";
8389 else if (Ty
->isObjCClassType()) {
8390 S
+= "{objc_class=}";
8393 // TODO: Double check to make sure this intentionally falls through.
8397 case Type::ObjCInterface
: {
8398 // Ignore protocol qualifiers when mangling at this level.
8399 // @encode(class_name)
8400 ObjCInterfaceDecl
*OI
= T
->castAs
<ObjCObjectType
>()->getInterface();
8402 S
+= OI
->getObjCRuntimeNameAsString();
8403 if (Options
.ExpandStructures()) {
8405 SmallVector
<const ObjCIvarDecl
*, 32> Ivars
;
8406 DeepCollectObjCIvars(OI
, true, Ivars
);
8407 for (unsigned i
= 0, e
= Ivars
.size(); i
!= e
; ++i
) {
8408 const FieldDecl
*Field
= Ivars
[i
];
8409 if (Field
->isBitField())
8410 getObjCEncodingForTypeImpl(Field
->getType(), S
,
8411 ObjCEncOptions().setExpandStructures(),
8414 getObjCEncodingForTypeImpl(Field
->getType(), S
,
8415 ObjCEncOptions().setExpandStructures(), FD
,
8423 case Type::ObjCObjectPointer
: {
8424 const auto *OPT
= T
->castAs
<ObjCObjectPointerType
>();
8425 if (OPT
->isObjCIdType()) {
8430 if (OPT
->isObjCClassType() || OPT
->isObjCQualifiedClassType()) {
8431 // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
8432 // Since this is a binary compatibility issue, need to consult with
8433 // runtime folks. Fortunately, this is a *very* obscure construct.
8438 if (OPT
->isObjCQualifiedIdType()) {
8439 getObjCEncodingForTypeImpl(
8441 Options
.keepingOnly(ObjCEncOptions()
8442 .setExpandPointedToStructures()
8443 .setExpandStructures()),
8445 if (FD
|| Options
.EncodingProperty() || Options
.EncodeClassNames()) {
8446 // Note that we do extended encoding of protocol qualifier list
8447 // Only when doing ivar or property encoding.
8449 for (const auto *I
: OPT
->quals()) {
8451 S
+= I
->getObjCRuntimeNameAsString();
8460 if (OPT
->getInterfaceDecl() &&
8461 (FD
|| Options
.EncodingProperty() || Options
.EncodeClassNames())) {
8463 S
+= OPT
->getInterfaceDecl()->getObjCRuntimeNameAsString();
8464 for (const auto *I
: OPT
->quals()) {
8466 S
+= I
->getObjCRuntimeNameAsString();
8474 // gcc just blithely ignores member pointers.
8475 // FIXME: we should do better than that. 'M' is available.
8476 case Type::MemberPointer
:
8477 // This matches gcc's encoding, even though technically it is insufficient.
8478 //FIXME. We should do a better job than gcc.
8480 case Type::ExtVector
:
8481 // Until we have a coherent encoding of these three types, issue warning.
8486 case Type::ConstantMatrix
:
8496 // We could see an undeduced auto type here during error recovery.
8499 case Type::DeducedTemplateSpecialization
:
8503 #define ABSTRACT_TYPE(KIND, BASE)
8504 #define TYPE(KIND, BASE)
8505 #define DEPENDENT_TYPE(KIND, BASE) \
8507 #define NON_CANONICAL_TYPE(KIND, BASE) \
8509 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
8511 #include "clang/AST/TypeNodes.inc"
8512 llvm_unreachable("@encode for dependent type!");
8514 llvm_unreachable("bad type kind!");
8517 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl
*RDecl
,
8519 const FieldDecl
*FD
,
8521 QualType
*NotEncodedT
) const {
8522 assert(RDecl
&& "Expected non-null RecordDecl");
8523 assert(!RDecl
->isUnion() && "Should not be called for unions");
8524 if (!RDecl
->getDefinition() || RDecl
->getDefinition()->isInvalidDecl())
8527 const auto *CXXRec
= dyn_cast
<CXXRecordDecl
>(RDecl
);
8528 std::multimap
<uint64_t, NamedDecl
*> FieldOrBaseOffsets
;
8529 const ASTRecordLayout
&layout
= getASTRecordLayout(RDecl
);
8532 for (const auto &BI
: CXXRec
->bases()) {
8533 if (!BI
.isVirtual()) {
8534 CXXRecordDecl
*base
= BI
.getType()->getAsCXXRecordDecl();
8535 if (base
->isEmpty())
8537 uint64_t offs
= toBits(layout
.getBaseClassOffset(base
));
8538 FieldOrBaseOffsets
.insert(FieldOrBaseOffsets
.upper_bound(offs
),
8539 std::make_pair(offs
, base
));
8544 for (FieldDecl
*Field
: RDecl
->fields()) {
8545 if (!Field
->isZeroLengthBitField(*this) && Field
->isZeroSize(*this))
8547 uint64_t offs
= layout
.getFieldOffset(Field
->getFieldIndex());
8548 FieldOrBaseOffsets
.insert(FieldOrBaseOffsets
.upper_bound(offs
),
8549 std::make_pair(offs
, Field
));
8552 if (CXXRec
&& includeVBases
) {
8553 for (const auto &BI
: CXXRec
->vbases()) {
8554 CXXRecordDecl
*base
= BI
.getType()->getAsCXXRecordDecl();
8555 if (base
->isEmpty())
8557 uint64_t offs
= toBits(layout
.getVBaseClassOffset(base
));
8558 if (offs
>= uint64_t(toBits(layout
.getNonVirtualSize())) &&
8559 FieldOrBaseOffsets
.find(offs
) == FieldOrBaseOffsets
.end())
8560 FieldOrBaseOffsets
.insert(FieldOrBaseOffsets
.end(),
8561 std::make_pair(offs
, base
));
8567 size
= includeVBases
? layout
.getSize() : layout
.getNonVirtualSize();
8569 size
= layout
.getSize();
8573 uint64_t CurOffs
= 0;
8575 std::multimap
<uint64_t, NamedDecl
*>::iterator
8576 CurLayObj
= FieldOrBaseOffsets
.begin();
8578 if (CXXRec
&& CXXRec
->isDynamicClass() &&
8579 (CurLayObj
== FieldOrBaseOffsets
.end() || CurLayObj
->first
!= 0)) {
8582 std::string recname
= CXXRec
->getNameAsString();
8583 if (recname
.empty()) recname
= "?";
8589 CurOffs
+= getTypeSize(VoidPtrTy
);
8593 if (!RDecl
->hasFlexibleArrayMember()) {
8594 // Mark the end of the structure.
8595 uint64_t offs
= toBits(size
);
8596 FieldOrBaseOffsets
.insert(FieldOrBaseOffsets
.upper_bound(offs
),
8597 std::make_pair(offs
, nullptr));
8600 for (; CurLayObj
!= FieldOrBaseOffsets
.end(); ++CurLayObj
) {
8602 assert(CurOffs
<= CurLayObj
->first
);
8603 if (CurOffs
< CurLayObj
->first
) {
8604 uint64_t padding
= CurLayObj
->first
- CurOffs
;
8605 // FIXME: There doesn't seem to be a way to indicate in the encoding that
8606 // packing/alignment of members is different that normal, in which case
8607 // the encoding will be out-of-sync with the real layout.
8608 // If the runtime switches to just consider the size of types without
8609 // taking into account alignment, we could make padding explicit in the
8610 // encoding (e.g. using arrays of chars). The encoding strings would be
8611 // longer then though.
8616 NamedDecl
*dcl
= CurLayObj
->second
;
8618 break; // reached end of structure.
8620 if (auto *base
= dyn_cast
<CXXRecordDecl
>(dcl
)) {
8621 // We expand the bases without their virtual bases since those are going
8622 // in the initial structure. Note that this differs from gcc which
8623 // expands virtual bases each time one is encountered in the hierarchy,
8624 // making the encoding type bigger than it really is.
8625 getObjCEncodingForStructureImpl(base
, S
, FD
, /*includeVBases*/false,
8627 assert(!base
->isEmpty());
8629 CurOffs
+= toBits(getASTRecordLayout(base
).getNonVirtualSize());
8632 const auto *field
= cast
<FieldDecl
>(dcl
);
8635 S
+= field
->getNameAsString();
8639 if (field
->isBitField()) {
8640 EncodeBitField(this, S
, field
->getType(), field
);
8642 CurOffs
+= field
->getBitWidthValue(*this);
8645 QualType qt
= field
->getType();
8646 getLegacyIntegralTypeEncoding(qt
);
8647 getObjCEncodingForTypeImpl(
8648 qt
, S
, ObjCEncOptions().setExpandStructures().setIsStructField(),
8651 CurOffs
+= getTypeSize(field
->getType());
8658 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT
,
8659 std::string
& S
) const {
8660 if (QT
& Decl::OBJC_TQ_In
)
8662 if (QT
& Decl::OBJC_TQ_Inout
)
8664 if (QT
& Decl::OBJC_TQ_Out
)
8666 if (QT
& Decl::OBJC_TQ_Bycopy
)
8668 if (QT
& Decl::OBJC_TQ_Byref
)
8670 if (QT
& Decl::OBJC_TQ_Oneway
)
8674 TypedefDecl
*ASTContext::getObjCIdDecl() const {
8676 QualType T
= getObjCObjectType(ObjCBuiltinIdTy
, {}, {});
8677 T
= getObjCObjectPointerType(T
);
8678 ObjCIdDecl
= buildImplicitTypedef(T
, "id");
8683 TypedefDecl
*ASTContext::getObjCSelDecl() const {
8685 QualType T
= getPointerType(ObjCBuiltinSelTy
);
8686 ObjCSelDecl
= buildImplicitTypedef(T
, "SEL");
8691 TypedefDecl
*ASTContext::getObjCClassDecl() const {
8692 if (!ObjCClassDecl
) {
8693 QualType T
= getObjCObjectType(ObjCBuiltinClassTy
, {}, {});
8694 T
= getObjCObjectPointerType(T
);
8695 ObjCClassDecl
= buildImplicitTypedef(T
, "Class");
8697 return ObjCClassDecl
;
8700 ObjCInterfaceDecl
*ASTContext::getObjCProtocolDecl() const {
8701 if (!ObjCProtocolClassDecl
) {
8702 ObjCProtocolClassDecl
8703 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(),
8705 &Idents
.get("Protocol"),
8706 /*typeParamList=*/nullptr,
8707 /*PrevDecl=*/nullptr,
8708 SourceLocation(), true);
8711 return ObjCProtocolClassDecl
;
8714 //===----------------------------------------------------------------------===//
8715 // __builtin_va_list Construction Functions
8716 //===----------------------------------------------------------------------===//
8718 static TypedefDecl
*CreateCharPtrNamedVaListDecl(const ASTContext
*Context
,
8720 // typedef char* __builtin[_ms]_va_list;
8721 QualType T
= Context
->getPointerType(Context
->CharTy
);
8722 return Context
->buildImplicitTypedef(T
, Name
);
8725 static TypedefDecl
*CreateMSVaListDecl(const ASTContext
*Context
) {
8726 return CreateCharPtrNamedVaListDecl(Context
, "__builtin_ms_va_list");
8729 static TypedefDecl
*CreateCharPtrBuiltinVaListDecl(const ASTContext
*Context
) {
8730 return CreateCharPtrNamedVaListDecl(Context
, "__builtin_va_list");
8733 static TypedefDecl
*CreateVoidPtrBuiltinVaListDecl(const ASTContext
*Context
) {
8734 // typedef void* __builtin_va_list;
8735 QualType T
= Context
->getPointerType(Context
->VoidTy
);
8736 return Context
->buildImplicitTypedef(T
, "__builtin_va_list");
8739 static TypedefDecl
*
8740 CreateAArch64ABIBuiltinVaListDecl(const ASTContext
*Context
) {
8742 RecordDecl
*VaListTagDecl
= Context
->buildImplicitRecord("__va_list");
8743 if (Context
->getLangOpts().CPlusPlus
) {
8744 // namespace std { struct __va_list {
8745 auto *NS
= NamespaceDecl::Create(
8746 const_cast<ASTContext
&>(*Context
), Context
->getTranslationUnitDecl(),
8747 /*Inline=*/false, SourceLocation(), SourceLocation(),
8748 &Context
->Idents
.get("std"),
8749 /*PrevDecl=*/nullptr, /*Nested=*/false);
8751 VaListTagDecl
->setDeclContext(NS
);
8754 VaListTagDecl
->startDefinition();
8756 const size_t NumFields
= 5;
8757 QualType FieldTypes
[NumFields
];
8758 const char *FieldNames
[NumFields
];
8761 FieldTypes
[0] = Context
->getPointerType(Context
->VoidTy
);
8762 FieldNames
[0] = "__stack";
8765 FieldTypes
[1] = Context
->getPointerType(Context
->VoidTy
);
8766 FieldNames
[1] = "__gr_top";
8769 FieldTypes
[2] = Context
->getPointerType(Context
->VoidTy
);
8770 FieldNames
[2] = "__vr_top";
8773 FieldTypes
[3] = Context
->IntTy
;
8774 FieldNames
[3] = "__gr_offs";
8777 FieldTypes
[4] = Context
->IntTy
;
8778 FieldNames
[4] = "__vr_offs";
8781 for (unsigned i
= 0; i
< NumFields
; ++i
) {
8782 FieldDecl
*Field
= FieldDecl::Create(const_cast<ASTContext
&>(*Context
),
8786 &Context
->Idents
.get(FieldNames
[i
]),
8787 FieldTypes
[i
], /*TInfo=*/nullptr,
8788 /*BitWidth=*/nullptr,
8791 Field
->setAccess(AS_public
);
8792 VaListTagDecl
->addDecl(Field
);
8794 VaListTagDecl
->completeDefinition();
8795 Context
->VaListTagDecl
= VaListTagDecl
;
8796 QualType VaListTagType
= Context
->getRecordType(VaListTagDecl
);
8798 // } __builtin_va_list;
8799 return Context
->buildImplicitTypedef(VaListTagType
, "__builtin_va_list");
8802 static TypedefDecl
*CreatePowerABIBuiltinVaListDecl(const ASTContext
*Context
) {
8803 // typedef struct __va_list_tag {
8804 RecordDecl
*VaListTagDecl
;
8806 VaListTagDecl
= Context
->buildImplicitRecord("__va_list_tag");
8807 VaListTagDecl
->startDefinition();
8809 const size_t NumFields
= 5;
8810 QualType FieldTypes
[NumFields
];
8811 const char *FieldNames
[NumFields
];
8813 // unsigned char gpr;
8814 FieldTypes
[0] = Context
->UnsignedCharTy
;
8815 FieldNames
[0] = "gpr";
8817 // unsigned char fpr;
8818 FieldTypes
[1] = Context
->UnsignedCharTy
;
8819 FieldNames
[1] = "fpr";
8821 // unsigned short reserved;
8822 FieldTypes
[2] = Context
->UnsignedShortTy
;
8823 FieldNames
[2] = "reserved";
8825 // void* overflow_arg_area;
8826 FieldTypes
[3] = Context
->getPointerType(Context
->VoidTy
);
8827 FieldNames
[3] = "overflow_arg_area";
8829 // void* reg_save_area;
8830 FieldTypes
[4] = Context
->getPointerType(Context
->VoidTy
);
8831 FieldNames
[4] = "reg_save_area";
8834 for (unsigned i
= 0; i
< NumFields
; ++i
) {
8835 FieldDecl
*Field
= FieldDecl::Create(*Context
, VaListTagDecl
,
8838 &Context
->Idents
.get(FieldNames
[i
]),
8839 FieldTypes
[i
], /*TInfo=*/nullptr,
8840 /*BitWidth=*/nullptr,
8843 Field
->setAccess(AS_public
);
8844 VaListTagDecl
->addDecl(Field
);
8846 VaListTagDecl
->completeDefinition();
8847 Context
->VaListTagDecl
= VaListTagDecl
;
8848 QualType VaListTagType
= Context
->getRecordType(VaListTagDecl
);
8851 TypedefDecl
*VaListTagTypedefDecl
=
8852 Context
->buildImplicitTypedef(VaListTagType
, "__va_list_tag");
8854 QualType VaListTagTypedefType
=
8855 Context
->getTypedefType(VaListTagTypedefDecl
);
8857 // typedef __va_list_tag __builtin_va_list[1];
8858 llvm::APInt
Size(Context
->getTypeSize(Context
->getSizeType()), 1);
8859 QualType VaListTagArrayType
= Context
->getConstantArrayType(
8860 VaListTagTypedefType
, Size
, nullptr, ArraySizeModifier::Normal
, 0);
8861 return Context
->buildImplicitTypedef(VaListTagArrayType
, "__builtin_va_list");
8864 static TypedefDecl
*
8865 CreateX86_64ABIBuiltinVaListDecl(const ASTContext
*Context
) {
8866 // struct __va_list_tag {
8867 RecordDecl
*VaListTagDecl
;
8868 VaListTagDecl
= Context
->buildImplicitRecord("__va_list_tag");
8869 VaListTagDecl
->startDefinition();
8871 const size_t NumFields
= 4;
8872 QualType FieldTypes
[NumFields
];
8873 const char *FieldNames
[NumFields
];
8875 // unsigned gp_offset;
8876 FieldTypes
[0] = Context
->UnsignedIntTy
;
8877 FieldNames
[0] = "gp_offset";
8879 // unsigned fp_offset;
8880 FieldTypes
[1] = Context
->UnsignedIntTy
;
8881 FieldNames
[1] = "fp_offset";
8883 // void* overflow_arg_area;
8884 FieldTypes
[2] = Context
->getPointerType(Context
->VoidTy
);
8885 FieldNames
[2] = "overflow_arg_area";
8887 // void* reg_save_area;
8888 FieldTypes
[3] = Context
->getPointerType(Context
->VoidTy
);
8889 FieldNames
[3] = "reg_save_area";
8892 for (unsigned i
= 0; i
< NumFields
; ++i
) {
8893 FieldDecl
*Field
= FieldDecl::Create(const_cast<ASTContext
&>(*Context
),
8897 &Context
->Idents
.get(FieldNames
[i
]),
8898 FieldTypes
[i
], /*TInfo=*/nullptr,
8899 /*BitWidth=*/nullptr,
8902 Field
->setAccess(AS_public
);
8903 VaListTagDecl
->addDecl(Field
);
8905 VaListTagDecl
->completeDefinition();
8906 Context
->VaListTagDecl
= VaListTagDecl
;
8907 QualType VaListTagType
= Context
->getRecordType(VaListTagDecl
);
8911 // typedef struct __va_list_tag __builtin_va_list[1];
8912 llvm::APInt
Size(Context
->getTypeSize(Context
->getSizeType()), 1);
8913 QualType VaListTagArrayType
= Context
->getConstantArrayType(
8914 VaListTagType
, Size
, nullptr, ArraySizeModifier::Normal
, 0);
8915 return Context
->buildImplicitTypedef(VaListTagArrayType
, "__builtin_va_list");
8918 static TypedefDecl
*CreatePNaClABIBuiltinVaListDecl(const ASTContext
*Context
) {
8919 // typedef int __builtin_va_list[4];
8920 llvm::APInt
Size(Context
->getTypeSize(Context
->getSizeType()), 4);
8921 QualType IntArrayType
= Context
->getConstantArrayType(
8922 Context
->IntTy
, Size
, nullptr, ArraySizeModifier::Normal
, 0);
8923 return Context
->buildImplicitTypedef(IntArrayType
, "__builtin_va_list");
8926 static TypedefDecl
*
8927 CreateAAPCSABIBuiltinVaListDecl(const ASTContext
*Context
) {
8929 RecordDecl
*VaListDecl
= Context
->buildImplicitRecord("__va_list");
8930 if (Context
->getLangOpts().CPlusPlus
) {
8931 // namespace std { struct __va_list {
8933 NS
= NamespaceDecl::Create(const_cast<ASTContext
&>(*Context
),
8934 Context
->getTranslationUnitDecl(),
8935 /*Inline=*/false, SourceLocation(),
8936 SourceLocation(), &Context
->Idents
.get("std"),
8937 /*PrevDecl=*/nullptr, /*Nested=*/false);
8939 VaListDecl
->setDeclContext(NS
);
8942 VaListDecl
->startDefinition();
8945 FieldDecl
*Field
= FieldDecl::Create(const_cast<ASTContext
&>(*Context
),
8949 &Context
->Idents
.get("__ap"),
8950 Context
->getPointerType(Context
->VoidTy
),
8952 /*BitWidth=*/nullptr,
8955 Field
->setAccess(AS_public
);
8956 VaListDecl
->addDecl(Field
);
8959 VaListDecl
->completeDefinition();
8960 Context
->VaListTagDecl
= VaListDecl
;
8962 // typedef struct __va_list __builtin_va_list;
8963 QualType T
= Context
->getRecordType(VaListDecl
);
8964 return Context
->buildImplicitTypedef(T
, "__builtin_va_list");
8967 static TypedefDecl
*
8968 CreateSystemZBuiltinVaListDecl(const ASTContext
*Context
) {
8969 // struct __va_list_tag {
8970 RecordDecl
*VaListTagDecl
;
8971 VaListTagDecl
= Context
->buildImplicitRecord("__va_list_tag");
8972 VaListTagDecl
->startDefinition();
8974 const size_t NumFields
= 4;
8975 QualType FieldTypes
[NumFields
];
8976 const char *FieldNames
[NumFields
];
8979 FieldTypes
[0] = Context
->LongTy
;
8980 FieldNames
[0] = "__gpr";
8983 FieldTypes
[1] = Context
->LongTy
;
8984 FieldNames
[1] = "__fpr";
8986 // void *__overflow_arg_area;
8987 FieldTypes
[2] = Context
->getPointerType(Context
->VoidTy
);
8988 FieldNames
[2] = "__overflow_arg_area";
8990 // void *__reg_save_area;
8991 FieldTypes
[3] = Context
->getPointerType(Context
->VoidTy
);
8992 FieldNames
[3] = "__reg_save_area";
8995 for (unsigned i
= 0; i
< NumFields
; ++i
) {
8996 FieldDecl
*Field
= FieldDecl::Create(const_cast<ASTContext
&>(*Context
),
9000 &Context
->Idents
.get(FieldNames
[i
]),
9001 FieldTypes
[i
], /*TInfo=*/nullptr,
9002 /*BitWidth=*/nullptr,
9005 Field
->setAccess(AS_public
);
9006 VaListTagDecl
->addDecl(Field
);
9008 VaListTagDecl
->completeDefinition();
9009 Context
->VaListTagDecl
= VaListTagDecl
;
9010 QualType VaListTagType
= Context
->getRecordType(VaListTagDecl
);
9014 // typedef __va_list_tag __builtin_va_list[1];
9015 llvm::APInt
Size(Context
->getTypeSize(Context
->getSizeType()), 1);
9016 QualType VaListTagArrayType
= Context
->getConstantArrayType(
9017 VaListTagType
, Size
, nullptr, ArraySizeModifier::Normal
, 0);
9019 return Context
->buildImplicitTypedef(VaListTagArrayType
, "__builtin_va_list");
9022 static TypedefDecl
*CreateHexagonBuiltinVaListDecl(const ASTContext
*Context
) {
9023 // typedef struct __va_list_tag {
9024 RecordDecl
*VaListTagDecl
;
9025 VaListTagDecl
= Context
->buildImplicitRecord("__va_list_tag");
9026 VaListTagDecl
->startDefinition();
9028 const size_t NumFields
= 3;
9029 QualType FieldTypes
[NumFields
];
9030 const char *FieldNames
[NumFields
];
9032 // void *CurrentSavedRegisterArea;
9033 FieldTypes
[0] = Context
->getPointerType(Context
->VoidTy
);
9034 FieldNames
[0] = "__current_saved_reg_area_pointer";
9036 // void *SavedRegAreaEnd;
9037 FieldTypes
[1] = Context
->getPointerType(Context
->VoidTy
);
9038 FieldNames
[1] = "__saved_reg_area_end_pointer";
9040 // void *OverflowArea;
9041 FieldTypes
[2] = Context
->getPointerType(Context
->VoidTy
);
9042 FieldNames
[2] = "__overflow_area_pointer";
9045 for (unsigned i
= 0; i
< NumFields
; ++i
) {
9046 FieldDecl
*Field
= FieldDecl::Create(
9047 const_cast<ASTContext
&>(*Context
), VaListTagDecl
, SourceLocation(),
9048 SourceLocation(), &Context
->Idents
.get(FieldNames
[i
]), FieldTypes
[i
],
9050 /*BitWidth=*/nullptr,
9051 /*Mutable=*/false, ICIS_NoInit
);
9052 Field
->setAccess(AS_public
);
9053 VaListTagDecl
->addDecl(Field
);
9055 VaListTagDecl
->completeDefinition();
9056 Context
->VaListTagDecl
= VaListTagDecl
;
9057 QualType VaListTagType
= Context
->getRecordType(VaListTagDecl
);
9060 TypedefDecl
*VaListTagTypedefDecl
=
9061 Context
->buildImplicitTypedef(VaListTagType
, "__va_list_tag");
9063 QualType VaListTagTypedefType
= Context
->getTypedefType(VaListTagTypedefDecl
);
9065 // typedef __va_list_tag __builtin_va_list[1];
9066 llvm::APInt
Size(Context
->getTypeSize(Context
->getSizeType()), 1);
9067 QualType VaListTagArrayType
= Context
->getConstantArrayType(
9068 VaListTagTypedefType
, Size
, nullptr, ArraySizeModifier::Normal
, 0);
9070 return Context
->buildImplicitTypedef(VaListTagArrayType
, "__builtin_va_list");
9073 static TypedefDecl
*CreateVaListDecl(const ASTContext
*Context
,
9074 TargetInfo::BuiltinVaListKind Kind
) {
9076 case TargetInfo::CharPtrBuiltinVaList
:
9077 return CreateCharPtrBuiltinVaListDecl(Context
);
9078 case TargetInfo::VoidPtrBuiltinVaList
:
9079 return CreateVoidPtrBuiltinVaListDecl(Context
);
9080 case TargetInfo::AArch64ABIBuiltinVaList
:
9081 return CreateAArch64ABIBuiltinVaListDecl(Context
);
9082 case TargetInfo::PowerABIBuiltinVaList
:
9083 return CreatePowerABIBuiltinVaListDecl(Context
);
9084 case TargetInfo::X86_64ABIBuiltinVaList
:
9085 return CreateX86_64ABIBuiltinVaListDecl(Context
);
9086 case TargetInfo::PNaClABIBuiltinVaList
:
9087 return CreatePNaClABIBuiltinVaListDecl(Context
);
9088 case TargetInfo::AAPCSABIBuiltinVaList
:
9089 return CreateAAPCSABIBuiltinVaListDecl(Context
);
9090 case TargetInfo::SystemZBuiltinVaList
:
9091 return CreateSystemZBuiltinVaListDecl(Context
);
9092 case TargetInfo::HexagonBuiltinVaList
:
9093 return CreateHexagonBuiltinVaListDecl(Context
);
9096 llvm_unreachable("Unhandled __builtin_va_list type kind");
9099 TypedefDecl
*ASTContext::getBuiltinVaListDecl() const {
9100 if (!BuiltinVaListDecl
) {
9101 BuiltinVaListDecl
= CreateVaListDecl(this, Target
->getBuiltinVaListKind());
9102 assert(BuiltinVaListDecl
->isImplicit());
9105 return BuiltinVaListDecl
;
9108 Decl
*ASTContext::getVaListTagDecl() const {
9109 // Force the creation of VaListTagDecl by building the __builtin_va_list
9112 (void)getBuiltinVaListDecl();
9114 return VaListTagDecl
;
9117 TypedefDecl
*ASTContext::getBuiltinMSVaListDecl() const {
9118 if (!BuiltinMSVaListDecl
)
9119 BuiltinMSVaListDecl
= CreateMSVaListDecl(this);
9121 return BuiltinMSVaListDecl
;
9124 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl
*FD
) const {
9125 // Allow redecl custom type checking builtin for HLSL.
9126 if (LangOpts
.HLSL
&& FD
->getBuiltinID() != Builtin::NotBuiltin
&&
9127 BuiltinInfo
.hasCustomTypechecking(FD
->getBuiltinID()))
9129 return BuiltinInfo
.canBeRedeclared(FD
->getBuiltinID());
9132 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl
*Decl
) {
9133 assert(ObjCConstantStringType
.isNull() &&
9134 "'NSConstantString' type already set!");
9136 ObjCConstantStringType
= getObjCInterfaceType(Decl
);
9139 /// Retrieve the template name that corresponds to a non-empty
9142 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin
,
9143 UnresolvedSetIterator End
) const {
9144 unsigned size
= End
- Begin
;
9145 assert(size
> 1 && "set is not overloaded!");
9147 void *memory
= Allocate(sizeof(OverloadedTemplateStorage
) +
9148 size
* sizeof(FunctionTemplateDecl
*));
9149 auto *OT
= new (memory
) OverloadedTemplateStorage(size
);
9151 NamedDecl
**Storage
= OT
->getStorage();
9152 for (UnresolvedSetIterator I
= Begin
; I
!= End
; ++I
) {
9154 assert(isa
<FunctionTemplateDecl
>(D
) ||
9155 isa
<UnresolvedUsingValueDecl
>(D
) ||
9156 (isa
<UsingShadowDecl
>(D
) &&
9157 isa
<FunctionTemplateDecl
>(D
->getUnderlyingDecl())));
9161 return TemplateName(OT
);
9164 /// Retrieve a template name representing an unqualified-id that has been
9165 /// assumed to name a template for ADL purposes.
9166 TemplateName
ASTContext::getAssumedTemplateName(DeclarationName Name
) const {
9167 auto *OT
= new (*this) AssumedTemplateStorage(Name
);
9168 return TemplateName(OT
);
9171 /// Retrieve the template name that represents a qualified
9172 /// template name such as \c std::vector.
9173 TemplateName
ASTContext::getQualifiedTemplateName(NestedNameSpecifier
*NNS
,
9174 bool TemplateKeyword
,
9175 TemplateName Template
) const {
9176 assert(NNS
&& "Missing nested-name-specifier in qualified template name");
9178 // FIXME: Canonicalization?
9179 llvm::FoldingSetNodeID ID
;
9180 QualifiedTemplateName::Profile(ID
, NNS
, TemplateKeyword
, Template
);
9182 void *InsertPos
= nullptr;
9183 QualifiedTemplateName
*QTN
=
9184 QualifiedTemplateNames
.FindNodeOrInsertPos(ID
, InsertPos
);
9186 QTN
= new (*this, alignof(QualifiedTemplateName
))
9187 QualifiedTemplateName(NNS
, TemplateKeyword
, Template
);
9188 QualifiedTemplateNames
.InsertNode(QTN
, InsertPos
);
9191 return TemplateName(QTN
);
9194 /// Retrieve the template name that represents a dependent
9195 /// template name such as \c MetaFun::template apply.
9197 ASTContext::getDependentTemplateName(NestedNameSpecifier
*NNS
,
9198 const IdentifierInfo
*Name
) const {
9199 assert((!NNS
|| NNS
->isDependent()) &&
9200 "Nested name specifier must be dependent");
9202 llvm::FoldingSetNodeID ID
;
9203 DependentTemplateName::Profile(ID
, NNS
, Name
);
9205 void *InsertPos
= nullptr;
9206 DependentTemplateName
*QTN
=
9207 DependentTemplateNames
.FindNodeOrInsertPos(ID
, InsertPos
);
9210 return TemplateName(QTN
);
9212 NestedNameSpecifier
*CanonNNS
= getCanonicalNestedNameSpecifier(NNS
);
9213 if (CanonNNS
== NNS
) {
9214 QTN
= new (*this, alignof(DependentTemplateName
))
9215 DependentTemplateName(NNS
, Name
);
9217 TemplateName Canon
= getDependentTemplateName(CanonNNS
, Name
);
9218 QTN
= new (*this, alignof(DependentTemplateName
))
9219 DependentTemplateName(NNS
, Name
, Canon
);
9220 DependentTemplateName
*CheckQTN
=
9221 DependentTemplateNames
.FindNodeOrInsertPos(ID
, InsertPos
);
9222 assert(!CheckQTN
&& "Dependent type name canonicalization broken");
9226 DependentTemplateNames
.InsertNode(QTN
, InsertPos
);
9227 return TemplateName(QTN
);
9230 /// Retrieve the template name that represents a dependent
9231 /// template name such as \c MetaFun::template operator+.
9233 ASTContext::getDependentTemplateName(NestedNameSpecifier
*NNS
,
9234 OverloadedOperatorKind Operator
) const {
9235 assert((!NNS
|| NNS
->isDependent()) &&
9236 "Nested name specifier must be dependent");
9238 llvm::FoldingSetNodeID ID
;
9239 DependentTemplateName::Profile(ID
, NNS
, Operator
);
9241 void *InsertPos
= nullptr;
9242 DependentTemplateName
*QTN
9243 = DependentTemplateNames
.FindNodeOrInsertPos(ID
, InsertPos
);
9246 return TemplateName(QTN
);
9248 NestedNameSpecifier
*CanonNNS
= getCanonicalNestedNameSpecifier(NNS
);
9249 if (CanonNNS
== NNS
) {
9250 QTN
= new (*this, alignof(DependentTemplateName
))
9251 DependentTemplateName(NNS
, Operator
);
9253 TemplateName Canon
= getDependentTemplateName(CanonNNS
, Operator
);
9254 QTN
= new (*this, alignof(DependentTemplateName
))
9255 DependentTemplateName(NNS
, Operator
, Canon
);
9257 DependentTemplateName
*CheckQTN
9258 = DependentTemplateNames
.FindNodeOrInsertPos(ID
, InsertPos
);
9259 assert(!CheckQTN
&& "Dependent template name canonicalization broken");
9263 DependentTemplateNames
.InsertNode(QTN
, InsertPos
);
9264 return TemplateName(QTN
);
9267 TemplateName
ASTContext::getSubstTemplateTemplateParm(
9268 TemplateName Replacement
, Decl
*AssociatedDecl
, unsigned Index
,
9269 std::optional
<unsigned> PackIndex
) const {
9270 llvm::FoldingSetNodeID ID
;
9271 SubstTemplateTemplateParmStorage::Profile(ID
, Replacement
, AssociatedDecl
,
9274 void *insertPos
= nullptr;
9275 SubstTemplateTemplateParmStorage
*subst
9276 = SubstTemplateTemplateParms
.FindNodeOrInsertPos(ID
, insertPos
);
9279 subst
= new (*this) SubstTemplateTemplateParmStorage(
9280 Replacement
, AssociatedDecl
, Index
, PackIndex
);
9281 SubstTemplateTemplateParms
.InsertNode(subst
, insertPos
);
9284 return TemplateName(subst
);
9288 ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument
&ArgPack
,
9289 Decl
*AssociatedDecl
,
9290 unsigned Index
, bool Final
) const {
9291 auto &Self
= const_cast<ASTContext
&>(*this);
9292 llvm::FoldingSetNodeID ID
;
9293 SubstTemplateTemplateParmPackStorage::Profile(ID
, Self
, ArgPack
,
9294 AssociatedDecl
, Index
, Final
);
9296 void *InsertPos
= nullptr;
9297 SubstTemplateTemplateParmPackStorage
*Subst
9298 = SubstTemplateTemplateParmPacks
.FindNodeOrInsertPos(ID
, InsertPos
);
9301 Subst
= new (*this) SubstTemplateTemplateParmPackStorage(
9302 ArgPack
.pack_elements(), AssociatedDecl
, Index
, Final
);
9303 SubstTemplateTemplateParmPacks
.InsertNode(Subst
, InsertPos
);
9306 return TemplateName(Subst
);
9309 /// getFromTargetType - Given one of the integer types provided by
9310 /// TargetInfo, produce the corresponding type. The unsigned @p Type
9311 /// is actually a value of type @c TargetInfo::IntType.
9312 CanQualType
ASTContext::getFromTargetType(unsigned Type
) const {
9314 case TargetInfo::NoInt
: return {};
9315 case TargetInfo::SignedChar
: return SignedCharTy
;
9316 case TargetInfo::UnsignedChar
: return UnsignedCharTy
;
9317 case TargetInfo::SignedShort
: return ShortTy
;
9318 case TargetInfo::UnsignedShort
: return UnsignedShortTy
;
9319 case TargetInfo::SignedInt
: return IntTy
;
9320 case TargetInfo::UnsignedInt
: return UnsignedIntTy
;
9321 case TargetInfo::SignedLong
: return LongTy
;
9322 case TargetInfo::UnsignedLong
: return UnsignedLongTy
;
9323 case TargetInfo::SignedLongLong
: return LongLongTy
;
9324 case TargetInfo::UnsignedLongLong
: return UnsignedLongLongTy
;
9327 llvm_unreachable("Unhandled TargetInfo::IntType value");
9330 //===----------------------------------------------------------------------===//
9332 //===----------------------------------------------------------------------===//
9334 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
9335 /// garbage collection attribute.
9337 Qualifiers::GC
ASTContext::getObjCGCAttrKind(QualType Ty
) const {
9338 if (getLangOpts().getGC() == LangOptions::NonGC
)
9339 return Qualifiers::GCNone
;
9341 assert(getLangOpts().ObjC
);
9342 Qualifiers::GC GCAttrs
= Ty
.getObjCGCAttr();
9344 // Default behaviour under objective-C's gc is for ObjC pointers
9345 // (or pointers to them) be treated as though they were declared
9347 if (GCAttrs
== Qualifiers::GCNone
) {
9348 if (Ty
->isObjCObjectPointerType() || Ty
->isBlockPointerType())
9349 return Qualifiers::Strong
;
9350 else if (Ty
->isPointerType())
9351 return getObjCGCAttrKind(Ty
->castAs
<PointerType
>()->getPointeeType());
9353 // It's not valid to set GC attributes on anything that isn't a
9356 QualType CT
= Ty
->getCanonicalTypeInternal();
9357 while (const auto *AT
= dyn_cast
<ArrayType
>(CT
))
9358 CT
= AT
->getElementType();
9359 assert(CT
->isAnyPointerType() || CT
->isBlockPointerType());
9365 //===----------------------------------------------------------------------===//
9366 // Type Compatibility Testing
9367 //===----------------------------------------------------------------------===//
9369 /// areCompatVectorTypes - Return true if the two specified vector types are
9371 static bool areCompatVectorTypes(const VectorType
*LHS
,
9372 const VectorType
*RHS
) {
9373 assert(LHS
->isCanonicalUnqualified() && RHS
->isCanonicalUnqualified());
9374 return LHS
->getElementType() == RHS
->getElementType() &&
9375 LHS
->getNumElements() == RHS
->getNumElements();
9378 /// areCompatMatrixTypes - Return true if the two specified matrix types are
9380 static bool areCompatMatrixTypes(const ConstantMatrixType
*LHS
,
9381 const ConstantMatrixType
*RHS
) {
9382 assert(LHS
->isCanonicalUnqualified() && RHS
->isCanonicalUnqualified());
9383 return LHS
->getElementType() == RHS
->getElementType() &&
9384 LHS
->getNumRows() == RHS
->getNumRows() &&
9385 LHS
->getNumColumns() == RHS
->getNumColumns();
9388 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec
,
9389 QualType SecondVec
) {
9390 assert(FirstVec
->isVectorType() && "FirstVec should be a vector type");
9391 assert(SecondVec
->isVectorType() && "SecondVec should be a vector type");
9393 if (hasSameUnqualifiedType(FirstVec
, SecondVec
))
9396 // Treat Neon vector types and most AltiVec vector types as if they are the
9397 // equivalent GCC vector types.
9398 const auto *First
= FirstVec
->castAs
<VectorType
>();
9399 const auto *Second
= SecondVec
->castAs
<VectorType
>();
9400 if (First
->getNumElements() == Second
->getNumElements() &&
9401 hasSameType(First
->getElementType(), Second
->getElementType()) &&
9402 First
->getVectorKind() != VectorKind::AltiVecPixel
&&
9403 First
->getVectorKind() != VectorKind::AltiVecBool
&&
9404 Second
->getVectorKind() != VectorKind::AltiVecPixel
&&
9405 Second
->getVectorKind() != VectorKind::AltiVecBool
&&
9406 First
->getVectorKind() != VectorKind::SveFixedLengthData
&&
9407 First
->getVectorKind() != VectorKind::SveFixedLengthPredicate
&&
9408 Second
->getVectorKind() != VectorKind::SveFixedLengthData
&&
9409 Second
->getVectorKind() != VectorKind::SveFixedLengthPredicate
&&
9410 First
->getVectorKind() != VectorKind::RVVFixedLengthData
&&
9411 Second
->getVectorKind() != VectorKind::RVVFixedLengthData
)
9417 /// getSVETypeSize - Return SVE vector or predicate register size.
9418 static uint64_t getSVETypeSize(ASTContext
&Context
, const BuiltinType
*Ty
) {
9419 assert(Ty
->isSveVLSBuiltinType() && "Invalid SVE Type");
9420 if (Ty
->getKind() == BuiltinType::SveBool
||
9421 Ty
->getKind() == BuiltinType::SveCount
)
9422 return (Context
.getLangOpts().VScaleMin
* 128) / Context
.getCharWidth();
9423 return Context
.getLangOpts().VScaleMin
* 128;
9426 bool ASTContext::areCompatibleSveTypes(QualType FirstType
,
9427 QualType SecondType
) {
9429 ((FirstType
->isSVESizelessBuiltinType() && SecondType
->isVectorType()) ||
9430 (FirstType
->isVectorType() && SecondType
->isSVESizelessBuiltinType())) &&
9431 "Expected SVE builtin type and vector type!");
9433 auto IsValidCast
= [this](QualType FirstType
, QualType SecondType
) {
9434 if (const auto *BT
= FirstType
->getAs
<BuiltinType
>()) {
9435 if (const auto *VT
= SecondType
->getAs
<VectorType
>()) {
9436 // Predicates have the same representation as uint8 so we also have to
9437 // check the kind to make these types incompatible.
9438 if (VT
->getVectorKind() == VectorKind::SveFixedLengthPredicate
)
9439 return BT
->getKind() == BuiltinType::SveBool
;
9440 else if (VT
->getVectorKind() == VectorKind::SveFixedLengthData
)
9441 return VT
->getElementType().getCanonicalType() ==
9442 FirstType
->getSveEltType(*this);
9443 else if (VT
->getVectorKind() == VectorKind::Generic
)
9444 return getTypeSize(SecondType
) == getSVETypeSize(*this, BT
) &&
9445 hasSameType(VT
->getElementType(),
9446 getBuiltinVectorTypeInfo(BT
).ElementType
);
9452 return IsValidCast(FirstType
, SecondType
) ||
9453 IsValidCast(SecondType
, FirstType
);
9456 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType
,
9457 QualType SecondType
) {
9459 ((FirstType
->isSVESizelessBuiltinType() && SecondType
->isVectorType()) ||
9460 (FirstType
->isVectorType() && SecondType
->isSVESizelessBuiltinType())) &&
9461 "Expected SVE builtin type and vector type!");
9463 auto IsLaxCompatible
= [this](QualType FirstType
, QualType SecondType
) {
9464 const auto *BT
= FirstType
->getAs
<BuiltinType
>();
9468 const auto *VecTy
= SecondType
->getAs
<VectorType
>();
9469 if (VecTy
&& (VecTy
->getVectorKind() == VectorKind::SveFixedLengthData
||
9470 VecTy
->getVectorKind() == VectorKind::Generic
)) {
9471 const LangOptions::LaxVectorConversionKind LVCKind
=
9472 getLangOpts().getLaxVectorConversions();
9474 // Can not convert between sve predicates and sve vectors because of
9476 if (BT
->getKind() == BuiltinType::SveBool
&&
9477 VecTy
->getVectorKind() == VectorKind::SveFixedLengthData
)
9480 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion.
9481 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly
9482 // converts to VLAT and VLAT implicitly converts to GNUT."
9483 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and
9485 if (VecTy
->getVectorKind() == VectorKind::Generic
&&
9486 getTypeSize(SecondType
) != getSVETypeSize(*this, BT
))
9489 // If -flax-vector-conversions=all is specified, the types are
9490 // certainly compatible.
9491 if (LVCKind
== LangOptions::LaxVectorConversionKind::All
)
9494 // If -flax-vector-conversions=integer is specified, the types are
9495 // compatible if the elements are integer types.
9496 if (LVCKind
== LangOptions::LaxVectorConversionKind::Integer
)
9497 return VecTy
->getElementType().getCanonicalType()->isIntegerType() &&
9498 FirstType
->getSveEltType(*this)->isIntegerType();
9504 return IsLaxCompatible(FirstType
, SecondType
) ||
9505 IsLaxCompatible(SecondType
, FirstType
);
9508 /// getRVVTypeSize - Return RVV vector register size.
9509 static uint64_t getRVVTypeSize(ASTContext
&Context
, const BuiltinType
*Ty
) {
9510 assert(Ty
->isRVVVLSBuiltinType() && "Invalid RVV Type");
9511 auto VScale
= Context
.getTargetInfo().getVScaleRange(Context
.getLangOpts());
9515 ASTContext::BuiltinVectorTypeInfo Info
= Context
.getBuiltinVectorTypeInfo(Ty
);
9517 uint64_t EltSize
= Context
.getTypeSize(Info
.ElementType
);
9518 uint64_t MinElts
= Info
.EC
.getKnownMinValue();
9519 return VScale
->first
* MinElts
* EltSize
;
9522 bool ASTContext::areCompatibleRVVTypes(QualType FirstType
,
9523 QualType SecondType
) {
9525 ((FirstType
->isRVVSizelessBuiltinType() && SecondType
->isVectorType()) ||
9526 (FirstType
->isVectorType() && SecondType
->isRVVSizelessBuiltinType())) &&
9527 "Expected RVV builtin type and vector type!");
9529 auto IsValidCast
= [this](QualType FirstType
, QualType SecondType
) {
9530 if (const auto *BT
= FirstType
->getAs
<BuiltinType
>()) {
9531 if (const auto *VT
= SecondType
->getAs
<VectorType
>()) {
9532 if (VT
->getVectorKind() == VectorKind::RVVFixedLengthData
||
9533 VT
->getVectorKind() == VectorKind::Generic
)
9534 return FirstType
->isRVVVLSBuiltinType() &&
9535 getTypeSize(SecondType
) == getRVVTypeSize(*this, BT
) &&
9536 hasSameType(VT
->getElementType(),
9537 getBuiltinVectorTypeInfo(BT
).ElementType
);
9543 return IsValidCast(FirstType
, SecondType
) ||
9544 IsValidCast(SecondType
, FirstType
);
9547 bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType
,
9548 QualType SecondType
) {
9550 ((FirstType
->isRVVSizelessBuiltinType() && SecondType
->isVectorType()) ||
9551 (FirstType
->isVectorType() && SecondType
->isRVVSizelessBuiltinType())) &&
9552 "Expected RVV builtin type and vector type!");
9554 auto IsLaxCompatible
= [this](QualType FirstType
, QualType SecondType
) {
9555 const auto *BT
= FirstType
->getAs
<BuiltinType
>();
9559 if (!BT
->isRVVVLSBuiltinType())
9562 const auto *VecTy
= SecondType
->getAs
<VectorType
>();
9563 if (VecTy
&& VecTy
->getVectorKind() == VectorKind::Generic
) {
9564 const LangOptions::LaxVectorConversionKind LVCKind
=
9565 getLangOpts().getLaxVectorConversions();
9567 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion.
9568 if (getTypeSize(SecondType
) != getRVVTypeSize(*this, BT
))
9571 // If -flax-vector-conversions=all is specified, the types are
9572 // certainly compatible.
9573 if (LVCKind
== LangOptions::LaxVectorConversionKind::All
)
9576 // If -flax-vector-conversions=integer is specified, the types are
9577 // compatible if the elements are integer types.
9578 if (LVCKind
== LangOptions::LaxVectorConversionKind::Integer
)
9579 return VecTy
->getElementType().getCanonicalType()->isIntegerType() &&
9580 FirstType
->getRVVEltType(*this)->isIntegerType();
9586 return IsLaxCompatible(FirstType
, SecondType
) ||
9587 IsLaxCompatible(SecondType
, FirstType
);
9590 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty
) const {
9593 if (const AttributedType
*Attr
= dyn_cast
<AttributedType
>(Ty
)) {
9594 if (Attr
->getAttrKind() == attr::ObjCOwnership
)
9597 Ty
= Attr
->getModifiedType();
9599 // X *__strong (...)
9600 } else if (const ParenType
*Paren
= dyn_cast
<ParenType
>(Ty
)) {
9601 Ty
= Paren
->getInnerType();
9603 // We do not want to look through typedefs, typeof(expr),
9604 // typeof(type), or any other way that the type is somehow
9612 //===----------------------------------------------------------------------===//
9613 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
9614 //===----------------------------------------------------------------------===//
9616 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
9617 /// inheritance hierarchy of 'rProto'.
9619 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl
*lProto
,
9620 ObjCProtocolDecl
*rProto
) const {
9621 if (declaresSameEntity(lProto
, rProto
))
9623 for (auto *PI
: rProto
->protocols())
9624 if (ProtocolCompatibleWithProtocol(lProto
, PI
))
9629 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
9630 /// Class<pr1, ...>.
9631 bool ASTContext::ObjCQualifiedClassTypesAreCompatible(
9632 const ObjCObjectPointerType
*lhs
, const ObjCObjectPointerType
*rhs
) {
9633 for (auto *lhsProto
: lhs
->quals()) {
9635 for (auto *rhsProto
: rhs
->quals()) {
9636 if (ProtocolCompatibleWithProtocol(lhsProto
, rhsProto
)) {
9647 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
9648 /// ObjCQualifiedIDType.
9649 bool ASTContext::ObjCQualifiedIdTypesAreCompatible(
9650 const ObjCObjectPointerType
*lhs
, const ObjCObjectPointerType
*rhs
,
9652 // Allow id<P..> and an 'id' in all cases.
9653 if (lhs
->isObjCIdType() || rhs
->isObjCIdType())
9656 // Don't allow id<P..> to convert to Class or Class<P..> in either direction.
9657 if (lhs
->isObjCClassType() || lhs
->isObjCQualifiedClassType() ||
9658 rhs
->isObjCClassType() || rhs
->isObjCQualifiedClassType())
9661 if (lhs
->isObjCQualifiedIdType()) {
9662 if (rhs
->qual_empty()) {
9663 // If the RHS is a unqualified interface pointer "NSString*",
9664 // make sure we check the class hierarchy.
9665 if (ObjCInterfaceDecl
*rhsID
= rhs
->getInterfaceDecl()) {
9666 for (auto *I
: lhs
->quals()) {
9667 // when comparing an id<P> on lhs with a static type on rhs,
9668 // see if static class implements all of id's protocols, directly or
9669 // through its super class and categories.
9670 if (!rhsID
->ClassImplementsProtocol(I
, true))
9674 // If there are no qualifiers and no interface, we have an 'id'.
9677 // Both the right and left sides have qualifiers.
9678 for (auto *lhsProto
: lhs
->quals()) {
9681 // when comparing an id<P> on lhs with a static type on rhs,
9682 // see if static class implements all of id's protocols, directly or
9683 // through its super class and categories.
9684 for (auto *rhsProto
: rhs
->quals()) {
9685 if (ProtocolCompatibleWithProtocol(lhsProto
, rhsProto
) ||
9686 (compare
&& ProtocolCompatibleWithProtocol(rhsProto
, lhsProto
))) {
9691 // If the RHS is a qualified interface pointer "NSString<P>*",
9692 // make sure we check the class hierarchy.
9693 if (ObjCInterfaceDecl
*rhsID
= rhs
->getInterfaceDecl()) {
9694 for (auto *I
: lhs
->quals()) {
9695 // when comparing an id<P> on lhs with a static type on rhs,
9696 // see if static class implements all of id's protocols, directly or
9697 // through its super class and categories.
9698 if (rhsID
->ClassImplementsProtocol(I
, true)) {
9711 assert(rhs
->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>");
9713 if (lhs
->getInterfaceType()) {
9714 // If both the right and left sides have qualifiers.
9715 for (auto *lhsProto
: lhs
->quals()) {
9718 // when comparing an id<P> on rhs with a static type on lhs,
9719 // see if static class implements all of id's protocols, directly or
9720 // through its super class and categories.
9721 // First, lhs protocols in the qualifier list must be found, direct
9722 // or indirect in rhs's qualifier list or it is a mismatch.
9723 for (auto *rhsProto
: rhs
->quals()) {
9724 if (ProtocolCompatibleWithProtocol(lhsProto
, rhsProto
) ||
9725 (compare
&& ProtocolCompatibleWithProtocol(rhsProto
, lhsProto
))) {
9734 // Static class's protocols, or its super class or category protocols
9735 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
9736 if (ObjCInterfaceDecl
*lhsID
= lhs
->getInterfaceDecl()) {
9737 llvm::SmallPtrSet
<ObjCProtocolDecl
*, 8> LHSInheritedProtocols
;
9738 CollectInheritedProtocols(lhsID
, LHSInheritedProtocols
);
9739 // This is rather dubious but matches gcc's behavior. If lhs has
9740 // no type qualifier and its class has no static protocol(s)
9741 // assume that it is mismatch.
9742 if (LHSInheritedProtocols
.empty() && lhs
->qual_empty())
9744 for (auto *lhsProto
: LHSInheritedProtocols
) {
9746 for (auto *rhsProto
: rhs
->quals()) {
9747 if (ProtocolCompatibleWithProtocol(lhsProto
, rhsProto
) ||
9748 (compare
&& ProtocolCompatibleWithProtocol(rhsProto
, lhsProto
))) {
9762 /// canAssignObjCInterfaces - Return true if the two interface types are
9763 /// compatible for assignment from RHS to LHS. This handles validation of any
9764 /// protocol qualifiers on the LHS or RHS.
9765 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType
*LHSOPT
,
9766 const ObjCObjectPointerType
*RHSOPT
) {
9767 const ObjCObjectType
* LHS
= LHSOPT
->getObjectType();
9768 const ObjCObjectType
* RHS
= RHSOPT
->getObjectType();
9770 // If either type represents the built-in 'id' type, return true.
9771 if (LHS
->isObjCUnqualifiedId() || RHS
->isObjCUnqualifiedId())
9774 // Function object that propagates a successful result or handles
9776 auto finish
= [&](bool succeeded
) -> bool {
9780 if (!RHS
->isKindOfType())
9783 // Strip off __kindof and protocol qualifiers, then check whether
9784 // we can assign the other way.
9785 return canAssignObjCInterfaces(RHSOPT
->stripObjCKindOfTypeAndQuals(*this),
9786 LHSOPT
->stripObjCKindOfTypeAndQuals(*this));
9789 // Casts from or to id<P> are allowed when the other side has compatible
9791 if (LHS
->isObjCQualifiedId() || RHS
->isObjCQualifiedId()) {
9792 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT
, RHSOPT
, false));
9795 // Verify protocol compatibility for casts from Class<P1> to Class<P2>.
9796 if (LHS
->isObjCQualifiedClass() && RHS
->isObjCQualifiedClass()) {
9797 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT
, RHSOPT
));
9800 // Casts from Class to Class<Foo>, or vice-versa, are allowed.
9801 if (LHS
->isObjCClass() && RHS
->isObjCClass()) {
9805 // If we have 2 user-defined types, fall into that path.
9806 if (LHS
->getInterface() && RHS
->getInterface()) {
9807 return finish(canAssignObjCInterfaces(LHS
, RHS
));
9813 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
9814 /// for providing type-safety for objective-c pointers used to pass/return
9815 /// arguments in block literals. When passed as arguments, passing 'A*' where
9816 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
9817 /// not OK. For the return type, the opposite is not OK.
9818 bool ASTContext::canAssignObjCInterfacesInBlockPointer(
9819 const ObjCObjectPointerType
*LHSOPT
,
9820 const ObjCObjectPointerType
*RHSOPT
,
9821 bool BlockReturnType
) {
9823 // Function object that propagates a successful result or handles
9825 auto finish
= [&](bool succeeded
) -> bool {
9829 const ObjCObjectPointerType
*Expected
= BlockReturnType
? RHSOPT
: LHSOPT
;
9830 if (!Expected
->isKindOfType())
9833 // Strip off __kindof and protocol qualifiers, then check whether
9834 // we can assign the other way.
9835 return canAssignObjCInterfacesInBlockPointer(
9836 RHSOPT
->stripObjCKindOfTypeAndQuals(*this),
9837 LHSOPT
->stripObjCKindOfTypeAndQuals(*this),
9841 if (RHSOPT
->isObjCBuiltinType() || LHSOPT
->isObjCIdType())
9844 if (LHSOPT
->isObjCBuiltinType()) {
9845 return finish(RHSOPT
->isObjCBuiltinType() ||
9846 RHSOPT
->isObjCQualifiedIdType());
9849 if (LHSOPT
->isObjCQualifiedIdType() || RHSOPT
->isObjCQualifiedIdType()) {
9850 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking
)
9851 // Use for block parameters previous type checking for compatibility.
9852 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT
, RHSOPT
, false) ||
9853 // Or corrected type checking as in non-compat mode.
9854 (!BlockReturnType
&&
9855 ObjCQualifiedIdTypesAreCompatible(RHSOPT
, LHSOPT
, false)));
9857 return finish(ObjCQualifiedIdTypesAreCompatible(
9858 (BlockReturnType
? LHSOPT
: RHSOPT
),
9859 (BlockReturnType
? RHSOPT
: LHSOPT
), false));
9862 const ObjCInterfaceType
* LHS
= LHSOPT
->getInterfaceType();
9863 const ObjCInterfaceType
* RHS
= RHSOPT
->getInterfaceType();
9864 if (LHS
&& RHS
) { // We have 2 user-defined types.
9866 if (LHS
->getDecl()->isSuperClassOf(RHS
->getDecl()))
9867 return finish(BlockReturnType
);
9868 if (RHS
->getDecl()->isSuperClassOf(LHS
->getDecl()))
9869 return finish(!BlockReturnType
);
9877 /// Comparison routine for Objective-C protocols to be used with
9878 /// llvm::array_pod_sort.
9879 static int compareObjCProtocolsByName(ObjCProtocolDecl
* const *lhs
,
9880 ObjCProtocolDecl
* const *rhs
) {
9881 return (*lhs
)->getName().compare((*rhs
)->getName());
9884 /// getIntersectionOfProtocols - This routine finds the intersection of set
9885 /// of protocols inherited from two distinct objective-c pointer objects with
9886 /// the given common base.
9887 /// It is used to build composite qualifier list of the composite type of
9888 /// the conditional expression involving two objective-c pointer objects.
9890 void getIntersectionOfProtocols(ASTContext
&Context
,
9891 const ObjCInterfaceDecl
*CommonBase
,
9892 const ObjCObjectPointerType
*LHSOPT
,
9893 const ObjCObjectPointerType
*RHSOPT
,
9894 SmallVectorImpl
<ObjCProtocolDecl
*> &IntersectionSet
) {
9896 const ObjCObjectType
* LHS
= LHSOPT
->getObjectType();
9897 const ObjCObjectType
* RHS
= RHSOPT
->getObjectType();
9898 assert(LHS
->getInterface() && "LHS must have an interface base");
9899 assert(RHS
->getInterface() && "RHS must have an interface base");
9901 // Add all of the protocols for the LHS.
9902 llvm::SmallPtrSet
<ObjCProtocolDecl
*, 8> LHSProtocolSet
;
9904 // Start with the protocol qualifiers.
9905 for (auto *proto
: LHS
->quals()) {
9906 Context
.CollectInheritedProtocols(proto
, LHSProtocolSet
);
9909 // Also add the protocols associated with the LHS interface.
9910 Context
.CollectInheritedProtocols(LHS
->getInterface(), LHSProtocolSet
);
9912 // Add all of the protocols for the RHS.
9913 llvm::SmallPtrSet
<ObjCProtocolDecl
*, 8> RHSProtocolSet
;
9915 // Start with the protocol qualifiers.
9916 for (auto *proto
: RHS
->quals()) {
9917 Context
.CollectInheritedProtocols(proto
, RHSProtocolSet
);
9920 // Also add the protocols associated with the RHS interface.
9921 Context
.CollectInheritedProtocols(RHS
->getInterface(), RHSProtocolSet
);
9923 // Compute the intersection of the collected protocol sets.
9924 for (auto *proto
: LHSProtocolSet
) {
9925 if (RHSProtocolSet
.count(proto
))
9926 IntersectionSet
.push_back(proto
);
9929 // Compute the set of protocols that is implied by either the common type or
9930 // the protocols within the intersection.
9931 llvm::SmallPtrSet
<ObjCProtocolDecl
*, 8> ImpliedProtocols
;
9932 Context
.CollectInheritedProtocols(CommonBase
, ImpliedProtocols
);
9934 // Remove any implied protocols from the list of inherited protocols.
9935 if (!ImpliedProtocols
.empty()) {
9936 llvm::erase_if(IntersectionSet
, [&](ObjCProtocolDecl
*proto
) -> bool {
9937 return ImpliedProtocols
.contains(proto
);
9941 // Sort the remaining protocols by name.
9942 llvm::array_pod_sort(IntersectionSet
.begin(), IntersectionSet
.end(),
9943 compareObjCProtocolsByName
);
9946 /// Determine whether the first type is a subtype of the second.
9947 static bool canAssignObjCObjectTypes(ASTContext
&ctx
, QualType lhs
,
9949 // Common case: two object pointers.
9950 const auto *lhsOPT
= lhs
->getAs
<ObjCObjectPointerType
>();
9951 const auto *rhsOPT
= rhs
->getAs
<ObjCObjectPointerType
>();
9952 if (lhsOPT
&& rhsOPT
)
9953 return ctx
.canAssignObjCInterfaces(lhsOPT
, rhsOPT
);
9955 // Two block pointers.
9956 const auto *lhsBlock
= lhs
->getAs
<BlockPointerType
>();
9957 const auto *rhsBlock
= rhs
->getAs
<BlockPointerType
>();
9958 if (lhsBlock
&& rhsBlock
)
9959 return ctx
.typesAreBlockPointerCompatible(lhs
, rhs
);
9961 // If either is an unqualified 'id' and the other is a block, it's
9963 if ((lhsOPT
&& lhsOPT
->isObjCIdType() && rhsBlock
) ||
9964 (rhsOPT
&& rhsOPT
->isObjCIdType() && lhsBlock
))
9970 // Check that the given Objective-C type argument lists are equivalent.
9971 static bool sameObjCTypeArgs(ASTContext
&ctx
,
9972 const ObjCInterfaceDecl
*iface
,
9973 ArrayRef
<QualType
> lhsArgs
,
9974 ArrayRef
<QualType
> rhsArgs
,
9976 if (lhsArgs
.size() != rhsArgs
.size())
9979 ObjCTypeParamList
*typeParams
= iface
->getTypeParamList();
9983 for (unsigned i
= 0, n
= lhsArgs
.size(); i
!= n
; ++i
) {
9984 if (ctx
.hasSameType(lhsArgs
[i
], rhsArgs
[i
]))
9987 switch (typeParams
->begin()[i
]->getVariance()) {
9988 case ObjCTypeParamVariance::Invariant
:
9990 !ctx
.hasSameType(lhsArgs
[i
].stripObjCKindOfType(ctx
),
9991 rhsArgs
[i
].stripObjCKindOfType(ctx
))) {
9996 case ObjCTypeParamVariance::Covariant
:
9997 if (!canAssignObjCObjectTypes(ctx
, lhsArgs
[i
], rhsArgs
[i
]))
10001 case ObjCTypeParamVariance::Contravariant
:
10002 if (!canAssignObjCObjectTypes(ctx
, rhsArgs
[i
], lhsArgs
[i
]))
10011 QualType
ASTContext::areCommonBaseCompatible(
10012 const ObjCObjectPointerType
*Lptr
,
10013 const ObjCObjectPointerType
*Rptr
) {
10014 const ObjCObjectType
*LHS
= Lptr
->getObjectType();
10015 const ObjCObjectType
*RHS
= Rptr
->getObjectType();
10016 const ObjCInterfaceDecl
* LDecl
= LHS
->getInterface();
10017 const ObjCInterfaceDecl
* RDecl
= RHS
->getInterface();
10019 if (!LDecl
|| !RDecl
)
10022 // When either LHS or RHS is a kindof type, we should return a kindof type.
10023 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return
10025 bool anyKindOf
= LHS
->isKindOfType() || RHS
->isKindOfType();
10027 // Follow the left-hand side up the class hierarchy until we either hit a
10028 // root or find the RHS. Record the ancestors in case we don't find it.
10029 llvm::SmallDenseMap
<const ObjCInterfaceDecl
*, const ObjCObjectType
*, 4>
10032 // Record this ancestor. We'll need this if the common type isn't in the
10033 // path from the LHS to the root.
10034 LHSAncestors
[LHS
->getInterface()->getCanonicalDecl()] = LHS
;
10036 if (declaresSameEntity(LHS
->getInterface(), RDecl
)) {
10037 // Get the type arguments.
10038 ArrayRef
<QualType
> LHSTypeArgs
= LHS
->getTypeArgsAsWritten();
10039 bool anyChanges
= false;
10040 if (LHS
->isSpecialized() && RHS
->isSpecialized()) {
10041 // Both have type arguments, compare them.
10042 if (!sameObjCTypeArgs(*this, LHS
->getInterface(),
10043 LHS
->getTypeArgs(), RHS
->getTypeArgs(),
10044 /*stripKindOf=*/true))
10046 } else if (LHS
->isSpecialized() != RHS
->isSpecialized()) {
10047 // If only one has type arguments, the result will not have type
10053 // Compute the intersection of protocols.
10054 SmallVector
<ObjCProtocolDecl
*, 8> Protocols
;
10055 getIntersectionOfProtocols(*this, LHS
->getInterface(), Lptr
, Rptr
,
10057 if (!Protocols
.empty())
10060 // If anything in the LHS will have changed, build a new result type.
10061 // If we need to return a kindof type but LHS is not a kindof type, we
10062 // build a new result type.
10063 if (anyChanges
|| LHS
->isKindOfType() != anyKindOf
) {
10064 QualType Result
= getObjCInterfaceType(LHS
->getInterface());
10065 Result
= getObjCObjectType(Result
, LHSTypeArgs
, Protocols
,
10066 anyKindOf
|| LHS
->isKindOfType());
10067 return getObjCObjectPointerType(Result
);
10070 return getObjCObjectPointerType(QualType(LHS
, 0));
10073 // Find the superclass.
10074 QualType LHSSuperType
= LHS
->getSuperClassType();
10075 if (LHSSuperType
.isNull())
10078 LHS
= LHSSuperType
->castAs
<ObjCObjectType
>();
10081 // We didn't find anything by following the LHS to its root; now check
10082 // the RHS against the cached set of ancestors.
10084 auto KnownLHS
= LHSAncestors
.find(RHS
->getInterface()->getCanonicalDecl());
10085 if (KnownLHS
!= LHSAncestors
.end()) {
10086 LHS
= KnownLHS
->second
;
10088 // Get the type arguments.
10089 ArrayRef
<QualType
> RHSTypeArgs
= RHS
->getTypeArgsAsWritten();
10090 bool anyChanges
= false;
10091 if (LHS
->isSpecialized() && RHS
->isSpecialized()) {
10092 // Both have type arguments, compare them.
10093 if (!sameObjCTypeArgs(*this, LHS
->getInterface(),
10094 LHS
->getTypeArgs(), RHS
->getTypeArgs(),
10095 /*stripKindOf=*/true))
10097 } else if (LHS
->isSpecialized() != RHS
->isSpecialized()) {
10098 // If only one has type arguments, the result will not have type
10104 // Compute the intersection of protocols.
10105 SmallVector
<ObjCProtocolDecl
*, 8> Protocols
;
10106 getIntersectionOfProtocols(*this, RHS
->getInterface(), Lptr
, Rptr
,
10108 if (!Protocols
.empty())
10111 // If we need to return a kindof type but RHS is not a kindof type, we
10112 // build a new result type.
10113 if (anyChanges
|| RHS
->isKindOfType() != anyKindOf
) {
10114 QualType Result
= getObjCInterfaceType(RHS
->getInterface());
10115 Result
= getObjCObjectType(Result
, RHSTypeArgs
, Protocols
,
10116 anyKindOf
|| RHS
->isKindOfType());
10117 return getObjCObjectPointerType(Result
);
10120 return getObjCObjectPointerType(QualType(RHS
, 0));
10123 // Find the superclass of the RHS.
10124 QualType RHSSuperType
= RHS
->getSuperClassType();
10125 if (RHSSuperType
.isNull())
10128 RHS
= RHSSuperType
->castAs
<ObjCObjectType
>();
10134 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType
*LHS
,
10135 const ObjCObjectType
*RHS
) {
10136 assert(LHS
->getInterface() && "LHS is not an interface type");
10137 assert(RHS
->getInterface() && "RHS is not an interface type");
10139 // Verify that the base decls are compatible: the RHS must be a subclass of
10141 ObjCInterfaceDecl
*LHSInterface
= LHS
->getInterface();
10142 bool IsSuperClass
= LHSInterface
->isSuperClassOf(RHS
->getInterface());
10146 // If the LHS has protocol qualifiers, determine whether all of them are
10147 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the
10149 if (LHS
->getNumProtocols() > 0) {
10150 // OK if conversion of LHS to SuperClass results in narrowing of types
10151 // ; i.e., SuperClass may implement at least one of the protocols
10152 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
10153 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
10154 llvm::SmallPtrSet
<ObjCProtocolDecl
*, 8> SuperClassInheritedProtocols
;
10155 CollectInheritedProtocols(RHS
->getInterface(), SuperClassInheritedProtocols
);
10156 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's
10158 for (auto *RHSPI
: RHS
->quals())
10159 CollectInheritedProtocols(RHSPI
, SuperClassInheritedProtocols
);
10160 // If there is no protocols associated with RHS, it is not a match.
10161 if (SuperClassInheritedProtocols
.empty())
10164 for (const auto *LHSProto
: LHS
->quals()) {
10165 bool SuperImplementsProtocol
= false;
10166 for (auto *SuperClassProto
: SuperClassInheritedProtocols
)
10167 if (SuperClassProto
->lookupProtocolNamed(LHSProto
->getIdentifier())) {
10168 SuperImplementsProtocol
= true;
10171 if (!SuperImplementsProtocol
)
10176 // If the LHS is specialized, we may need to check type arguments.
10177 if (LHS
->isSpecialized()) {
10178 // Follow the superclass chain until we've matched the LHS class in the
10179 // hierarchy. This substitutes type arguments through.
10180 const ObjCObjectType
*RHSSuper
= RHS
;
10181 while (!declaresSameEntity(RHSSuper
->getInterface(), LHSInterface
))
10182 RHSSuper
= RHSSuper
->getSuperClassType()->castAs
<ObjCObjectType
>();
10184 // If the RHS is specializd, compare type arguments.
10185 if (RHSSuper
->isSpecialized() &&
10186 !sameObjCTypeArgs(*this, LHS
->getInterface(),
10187 LHS
->getTypeArgs(), RHSSuper
->getTypeArgs(),
10188 /*stripKindOf=*/true)) {
10196 bool ASTContext::areComparableObjCPointerTypes(QualType LHS
, QualType RHS
) {
10197 // get the "pointed to" types
10198 const auto *LHSOPT
= LHS
->getAs
<ObjCObjectPointerType
>();
10199 const auto *RHSOPT
= RHS
->getAs
<ObjCObjectPointerType
>();
10201 if (!LHSOPT
|| !RHSOPT
)
10204 return canAssignObjCInterfaces(LHSOPT
, RHSOPT
) ||
10205 canAssignObjCInterfaces(RHSOPT
, LHSOPT
);
10208 bool ASTContext::canBindObjCObjectType(QualType To
, QualType From
) {
10209 return canAssignObjCInterfaces(
10210 getObjCObjectPointerType(To
)->castAs
<ObjCObjectPointerType
>(),
10211 getObjCObjectPointerType(From
)->castAs
<ObjCObjectPointerType
>());
10214 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
10215 /// both shall have the identically qualified version of a compatible type.
10216 /// C99 6.2.7p1: Two types have compatible types if their types are the
10217 /// same. See 6.7.[2,3,5] for additional rules.
10218 bool ASTContext::typesAreCompatible(QualType LHS
, QualType RHS
,
10219 bool CompareUnqualified
) {
10220 if (getLangOpts().CPlusPlus
)
10221 return hasSameType(LHS
, RHS
);
10223 return !mergeTypes(LHS
, RHS
, false, CompareUnqualified
).isNull();
10226 bool ASTContext::propertyTypesAreCompatible(QualType LHS
, QualType RHS
) {
10227 return typesAreCompatible(LHS
, RHS
);
10230 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS
, QualType RHS
) {
10231 return !mergeTypes(LHS
, RHS
, true).isNull();
10234 /// mergeTransparentUnionType - if T is a transparent union type and a member
10235 /// of T is compatible with SubType, return the merged type, else return
10237 QualType
ASTContext::mergeTransparentUnionType(QualType T
, QualType SubType
,
10238 bool OfBlockPointer
,
10239 bool Unqualified
) {
10240 if (const RecordType
*UT
= T
->getAsUnionType()) {
10241 RecordDecl
*UD
= UT
->getDecl();
10242 if (UD
->hasAttr
<TransparentUnionAttr
>()) {
10243 for (const auto *I
: UD
->fields()) {
10244 QualType ET
= I
->getType().getUnqualifiedType();
10245 QualType MT
= mergeTypes(ET
, SubType
, OfBlockPointer
, Unqualified
);
10255 /// mergeFunctionParameterTypes - merge two types which appear as function
10256 /// parameter types
10257 QualType
ASTContext::mergeFunctionParameterTypes(QualType lhs
, QualType rhs
,
10258 bool OfBlockPointer
,
10259 bool Unqualified
) {
10260 // GNU extension: two types are compatible if they appear as a function
10261 // argument, one of the types is a transparent union type and the other
10262 // type is compatible with a union member
10263 QualType lmerge
= mergeTransparentUnionType(lhs
, rhs
, OfBlockPointer
,
10265 if (!lmerge
.isNull())
10268 QualType rmerge
= mergeTransparentUnionType(rhs
, lhs
, OfBlockPointer
,
10270 if (!rmerge
.isNull())
10273 return mergeTypes(lhs
, rhs
, OfBlockPointer
, Unqualified
);
10276 QualType
ASTContext::mergeFunctionTypes(QualType lhs
, QualType rhs
,
10277 bool OfBlockPointer
, bool Unqualified
,
10279 bool IsConditionalOperator
) {
10280 const auto *lbase
= lhs
->castAs
<FunctionType
>();
10281 const auto *rbase
= rhs
->castAs
<FunctionType
>();
10282 const auto *lproto
= dyn_cast
<FunctionProtoType
>(lbase
);
10283 const auto *rproto
= dyn_cast
<FunctionProtoType
>(rbase
);
10284 bool allLTypes
= true;
10285 bool allRTypes
= true;
10287 // Check return type
10289 if (OfBlockPointer
) {
10290 QualType RHS
= rbase
->getReturnType();
10291 QualType LHS
= lbase
->getReturnType();
10292 bool UnqualifiedResult
= Unqualified
;
10293 if (!UnqualifiedResult
)
10294 UnqualifiedResult
= (!RHS
.hasQualifiers() && LHS
.hasQualifiers());
10295 retType
= mergeTypes(LHS
, RHS
, true, UnqualifiedResult
, true);
10298 retType
= mergeTypes(lbase
->getReturnType(), rbase
->getReturnType(), false,
10300 if (retType
.isNull())
10304 retType
= retType
.getUnqualifiedType();
10306 CanQualType LRetType
= getCanonicalType(lbase
->getReturnType());
10307 CanQualType RRetType
= getCanonicalType(rbase
->getReturnType());
10309 LRetType
= LRetType
.getUnqualifiedType();
10310 RRetType
= RRetType
.getUnqualifiedType();
10313 if (getCanonicalType(retType
) != LRetType
)
10315 if (getCanonicalType(retType
) != RRetType
)
10318 // FIXME: double check this
10319 // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
10320 // rbase->getRegParmAttr() != 0 &&
10321 // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
10322 FunctionType::ExtInfo lbaseInfo
= lbase
->getExtInfo();
10323 FunctionType::ExtInfo rbaseInfo
= rbase
->getExtInfo();
10325 // Compatible functions must have compatible calling conventions
10326 if (lbaseInfo
.getCC() != rbaseInfo
.getCC())
10329 // Regparm is part of the calling convention.
10330 if (lbaseInfo
.getHasRegParm() != rbaseInfo
.getHasRegParm())
10332 if (lbaseInfo
.getRegParm() != rbaseInfo
.getRegParm())
10335 if (lbaseInfo
.getProducesResult() != rbaseInfo
.getProducesResult())
10337 if (lbaseInfo
.getNoCallerSavedRegs() != rbaseInfo
.getNoCallerSavedRegs())
10339 if (lbaseInfo
.getNoCfCheck() != rbaseInfo
.getNoCfCheck())
10342 // When merging declarations, it's common for supplemental information like
10343 // attributes to only be present in one of the declarations, and we generally
10344 // want type merging to preserve the union of information. So a merged
10345 // function type should be noreturn if it was noreturn in *either* operand
10348 // But for the conditional operator, this is backwards. The result of the
10349 // operator could be either operand, and its type should conservatively
10350 // reflect that. So a function type in a composite type is noreturn only
10351 // if it's noreturn in *both* operand types.
10353 // Arguably, noreturn is a kind of subtype, and the conditional operator
10354 // ought to produce the most specific common supertype of its operand types.
10355 // That would differ from this rule in contravariant positions. However,
10356 // neither C nor C++ generally uses this kind of subtype reasoning. Also,
10357 // as a practical matter, it would only affect C code that does abstraction of
10358 // higher-order functions (taking noreturn callbacks!), which is uncommon to
10359 // say the least. So we use the simpler rule.
10360 bool NoReturn
= IsConditionalOperator
10361 ? lbaseInfo
.getNoReturn() && rbaseInfo
.getNoReturn()
10362 : lbaseInfo
.getNoReturn() || rbaseInfo
.getNoReturn();
10363 if (lbaseInfo
.getNoReturn() != NoReturn
)
10365 if (rbaseInfo
.getNoReturn() != NoReturn
)
10368 FunctionType::ExtInfo einfo
= lbaseInfo
.withNoReturn(NoReturn
);
10370 if (lproto
&& rproto
) { // two C99 style function prototypes
10371 assert((AllowCXX
||
10372 (!lproto
->hasExceptionSpec() && !rproto
->hasExceptionSpec())) &&
10373 "C++ shouldn't be here");
10374 // Compatible functions must have the same number of parameters
10375 if (lproto
->getNumParams() != rproto
->getNumParams())
10378 // Variadic and non-variadic functions aren't compatible
10379 if (lproto
->isVariadic() != rproto
->isVariadic())
10382 if (lproto
->getMethodQuals() != rproto
->getMethodQuals())
10385 SmallVector
<FunctionProtoType::ExtParameterInfo
, 4> newParamInfos
;
10386 bool canUseLeft
, canUseRight
;
10387 if (!mergeExtParameterInfo(lproto
, rproto
, canUseLeft
, canUseRight
,
10396 // Check parameter type compatibility
10397 SmallVector
<QualType
, 10> types
;
10398 for (unsigned i
= 0, n
= lproto
->getNumParams(); i
< n
; i
++) {
10399 QualType lParamType
= lproto
->getParamType(i
).getUnqualifiedType();
10400 QualType rParamType
= rproto
->getParamType(i
).getUnqualifiedType();
10401 QualType paramType
= mergeFunctionParameterTypes(
10402 lParamType
, rParamType
, OfBlockPointer
, Unqualified
);
10403 if (paramType
.isNull())
10407 paramType
= paramType
.getUnqualifiedType();
10409 types
.push_back(paramType
);
10411 lParamType
= lParamType
.getUnqualifiedType();
10412 rParamType
= rParamType
.getUnqualifiedType();
10415 if (getCanonicalType(paramType
) != getCanonicalType(lParamType
))
10417 if (getCanonicalType(paramType
) != getCanonicalType(rParamType
))
10421 if (allLTypes
) return lhs
;
10422 if (allRTypes
) return rhs
;
10424 FunctionProtoType::ExtProtoInfo EPI
= lproto
->getExtProtoInfo();
10425 EPI
.ExtInfo
= einfo
;
10426 EPI
.ExtParameterInfos
=
10427 newParamInfos
.empty() ? nullptr : newParamInfos
.data();
10428 return getFunctionType(retType
, types
, EPI
);
10431 if (lproto
) allRTypes
= false;
10432 if (rproto
) allLTypes
= false;
10434 const FunctionProtoType
*proto
= lproto
? lproto
: rproto
;
10436 assert((AllowCXX
|| !proto
->hasExceptionSpec()) && "C++ shouldn't be here");
10437 if (proto
->isVariadic())
10439 // Check that the types are compatible with the types that
10440 // would result from default argument promotions (C99 6.7.5.3p15).
10441 // The only types actually affected are promotable integer
10442 // types and floats, which would be passed as a different
10443 // type depending on whether the prototype is visible.
10444 for (unsigned i
= 0, n
= proto
->getNumParams(); i
< n
; ++i
) {
10445 QualType paramTy
= proto
->getParamType(i
);
10447 // Look at the converted type of enum types, since that is the type used
10448 // to pass enum values.
10449 if (const auto *Enum
= paramTy
->getAs
<EnumType
>()) {
10450 paramTy
= Enum
->getDecl()->getIntegerType();
10451 if (paramTy
.isNull())
10455 if (isPromotableIntegerType(paramTy
) ||
10456 getCanonicalType(paramTy
).getUnqualifiedType() == FloatTy
)
10460 if (allLTypes
) return lhs
;
10461 if (allRTypes
) return rhs
;
10463 FunctionProtoType::ExtProtoInfo EPI
= proto
->getExtProtoInfo();
10464 EPI
.ExtInfo
= einfo
;
10465 return getFunctionType(retType
, proto
->getParamTypes(), EPI
);
10468 if (allLTypes
) return lhs
;
10469 if (allRTypes
) return rhs
;
10470 return getFunctionNoProtoType(retType
, einfo
);
10473 /// Given that we have an enum type and a non-enum type, try to merge them.
10474 static QualType
mergeEnumWithInteger(ASTContext
&Context
, const EnumType
*ET
,
10475 QualType other
, bool isBlockReturnType
) {
10476 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
10477 // a signed integer type, or an unsigned integer type.
10478 // Compatibility is based on the underlying type, not the promotion
10480 QualType underlyingType
= ET
->getDecl()->getIntegerType();
10481 if (underlyingType
.isNull())
10483 if (Context
.hasSameType(underlyingType
, other
))
10486 // In block return types, we're more permissive and accept any
10487 // integral type of the same size.
10488 if (isBlockReturnType
&& other
->isIntegerType() &&
10489 Context
.getTypeSize(underlyingType
) == Context
.getTypeSize(other
))
10495 QualType
ASTContext::mergeTypes(QualType LHS
, QualType RHS
, bool OfBlockPointer
,
10496 bool Unqualified
, bool BlockReturnType
,
10497 bool IsConditionalOperator
) {
10498 // For C++ we will not reach this code with reference types (see below),
10499 // for OpenMP variant call overloading we might.
10501 // C++ [expr]: If an expression initially has the type "reference to T", the
10502 // type is adjusted to "T" prior to any further analysis, the expression
10503 // designates the object or function denoted by the reference, and the
10504 // expression is an lvalue unless the reference is an rvalue reference and
10505 // the expression is a function call (possibly inside parentheses).
10506 auto *LHSRefTy
= LHS
->getAs
<ReferenceType
>();
10507 auto *RHSRefTy
= RHS
->getAs
<ReferenceType
>();
10508 if (LangOpts
.OpenMP
&& LHSRefTy
&& RHSRefTy
&&
10509 LHS
->getTypeClass() == RHS
->getTypeClass())
10510 return mergeTypes(LHSRefTy
->getPointeeType(), RHSRefTy
->getPointeeType(),
10511 OfBlockPointer
, Unqualified
, BlockReturnType
);
10512 if (LHSRefTy
|| RHSRefTy
)
10516 LHS
= LHS
.getUnqualifiedType();
10517 RHS
= RHS
.getUnqualifiedType();
10520 QualType LHSCan
= getCanonicalType(LHS
),
10521 RHSCan
= getCanonicalType(RHS
);
10523 // If two types are identical, they are compatible.
10524 if (LHSCan
== RHSCan
)
10527 // If the qualifiers are different, the types aren't compatible... mostly.
10528 Qualifiers LQuals
= LHSCan
.getLocalQualifiers();
10529 Qualifiers RQuals
= RHSCan
.getLocalQualifiers();
10530 if (LQuals
!= RQuals
) {
10531 // If any of these qualifiers are different, we have a type
10533 if (LQuals
.getCVRQualifiers() != RQuals
.getCVRQualifiers() ||
10534 LQuals
.getAddressSpace() != RQuals
.getAddressSpace() ||
10535 LQuals
.getObjCLifetime() != RQuals
.getObjCLifetime() ||
10536 LQuals
.hasUnaligned() != RQuals
.hasUnaligned())
10539 // Exactly one GC qualifier difference is allowed: __strong is
10540 // okay if the other type has no GC qualifier but is an Objective
10541 // C object pointer (i.e. implicitly strong by default). We fix
10542 // this by pretending that the unqualified type was actually
10543 // qualified __strong.
10544 Qualifiers::GC GC_L
= LQuals
.getObjCGCAttr();
10545 Qualifiers::GC GC_R
= RQuals
.getObjCGCAttr();
10546 assert((GC_L
!= GC_R
) && "unequal qualifier sets had only equal elements");
10548 if (GC_L
== Qualifiers::Weak
|| GC_R
== Qualifiers::Weak
)
10551 if (GC_L
== Qualifiers::Strong
&& RHSCan
->isObjCObjectPointerType()) {
10552 return mergeTypes(LHS
, getObjCGCQualType(RHS
, Qualifiers::Strong
));
10554 if (GC_R
== Qualifiers::Strong
&& LHSCan
->isObjCObjectPointerType()) {
10555 return mergeTypes(getObjCGCQualType(LHS
, Qualifiers::Strong
), RHS
);
10560 // Okay, qualifiers are equal.
10562 Type::TypeClass LHSClass
= LHSCan
->getTypeClass();
10563 Type::TypeClass RHSClass
= RHSCan
->getTypeClass();
10565 // We want to consider the two function types to be the same for these
10566 // comparisons, just force one to the other.
10567 if (LHSClass
== Type::FunctionProto
) LHSClass
= Type::FunctionNoProto
;
10568 if (RHSClass
== Type::FunctionProto
) RHSClass
= Type::FunctionNoProto
;
10570 // Same as above for arrays
10571 if (LHSClass
== Type::VariableArray
|| LHSClass
== Type::IncompleteArray
)
10572 LHSClass
= Type::ConstantArray
;
10573 if (RHSClass
== Type::VariableArray
|| RHSClass
== Type::IncompleteArray
)
10574 RHSClass
= Type::ConstantArray
;
10576 // ObjCInterfaces are just specialized ObjCObjects.
10577 if (LHSClass
== Type::ObjCInterface
) LHSClass
= Type::ObjCObject
;
10578 if (RHSClass
== Type::ObjCInterface
) RHSClass
= Type::ObjCObject
;
10580 // Canonicalize ExtVector -> Vector.
10581 if (LHSClass
== Type::ExtVector
) LHSClass
= Type::Vector
;
10582 if (RHSClass
== Type::ExtVector
) RHSClass
= Type::Vector
;
10584 // If the canonical type classes don't match.
10585 if (LHSClass
!= RHSClass
) {
10586 // Note that we only have special rules for turning block enum
10587 // returns into block int returns, not vice-versa.
10588 if (const auto *ETy
= LHS
->getAs
<EnumType
>()) {
10589 return mergeEnumWithInteger(*this, ETy
, RHS
, false);
10591 if (const EnumType
* ETy
= RHS
->getAs
<EnumType
>()) {
10592 return mergeEnumWithInteger(*this, ETy
, LHS
, BlockReturnType
);
10594 // allow block pointer type to match an 'id' type.
10595 if (OfBlockPointer
&& !BlockReturnType
) {
10596 if (LHS
->isObjCIdType() && RHS
->isBlockPointerType())
10598 if (RHS
->isObjCIdType() && LHS
->isBlockPointerType())
10601 // Allow __auto_type to match anything; it merges to the type with more
10603 if (const auto *AT
= LHS
->getAs
<AutoType
>()) {
10604 if (!AT
->isDeduced() && AT
->isGNUAutoType())
10607 if (const auto *AT
= RHS
->getAs
<AutoType
>()) {
10608 if (!AT
->isDeduced() && AT
->isGNUAutoType())
10614 // The canonical type classes match.
10615 switch (LHSClass
) {
10616 #define TYPE(Class, Base)
10617 #define ABSTRACT_TYPE(Class, Base)
10618 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
10619 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
10620 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
10621 #include "clang/AST/TypeNodes.inc"
10622 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
10625 case Type::DeducedTemplateSpecialization
:
10626 case Type::LValueReference
:
10627 case Type::RValueReference
:
10628 case Type::MemberPointer
:
10629 llvm_unreachable("C++ should never be in mergeTypes");
10631 case Type::ObjCInterface
:
10632 case Type::IncompleteArray
:
10633 case Type::VariableArray
:
10634 case Type::FunctionProto
:
10635 case Type::ExtVector
:
10636 llvm_unreachable("Types are eliminated above");
10638 case Type::Pointer
:
10640 // Merge two pointer types, while trying to preserve typedef info
10641 QualType LHSPointee
= LHS
->castAs
<PointerType
>()->getPointeeType();
10642 QualType RHSPointee
= RHS
->castAs
<PointerType
>()->getPointeeType();
10644 LHSPointee
= LHSPointee
.getUnqualifiedType();
10645 RHSPointee
= RHSPointee
.getUnqualifiedType();
10647 QualType ResultType
= mergeTypes(LHSPointee
, RHSPointee
, false,
10649 if (ResultType
.isNull())
10651 if (getCanonicalType(LHSPointee
) == getCanonicalType(ResultType
))
10653 if (getCanonicalType(RHSPointee
) == getCanonicalType(ResultType
))
10655 return getPointerType(ResultType
);
10657 case Type::BlockPointer
:
10659 // Merge two block pointer types, while trying to preserve typedef info
10660 QualType LHSPointee
= LHS
->castAs
<BlockPointerType
>()->getPointeeType();
10661 QualType RHSPointee
= RHS
->castAs
<BlockPointerType
>()->getPointeeType();
10663 LHSPointee
= LHSPointee
.getUnqualifiedType();
10664 RHSPointee
= RHSPointee
.getUnqualifiedType();
10666 if (getLangOpts().OpenCL
) {
10667 Qualifiers LHSPteeQual
= LHSPointee
.getQualifiers();
10668 Qualifiers RHSPteeQual
= RHSPointee
.getQualifiers();
10669 // Blocks can't be an expression in a ternary operator (OpenCL v2.0
10670 // 6.12.5) thus the following check is asymmetric.
10671 if (!LHSPteeQual
.isAddressSpaceSupersetOf(RHSPteeQual
))
10673 LHSPteeQual
.removeAddressSpace();
10674 RHSPteeQual
.removeAddressSpace();
10676 QualType(LHSPointee
.getTypePtr(), LHSPteeQual
.getAsOpaqueValue());
10678 QualType(RHSPointee
.getTypePtr(), RHSPteeQual
.getAsOpaqueValue());
10680 QualType ResultType
= mergeTypes(LHSPointee
, RHSPointee
, OfBlockPointer
,
10682 if (ResultType
.isNull())
10684 if (getCanonicalType(LHSPointee
) == getCanonicalType(ResultType
))
10686 if (getCanonicalType(RHSPointee
) == getCanonicalType(ResultType
))
10688 return getBlockPointerType(ResultType
);
10692 // Merge two pointer types, while trying to preserve typedef info
10693 QualType LHSValue
= LHS
->castAs
<AtomicType
>()->getValueType();
10694 QualType RHSValue
= RHS
->castAs
<AtomicType
>()->getValueType();
10696 LHSValue
= LHSValue
.getUnqualifiedType();
10697 RHSValue
= RHSValue
.getUnqualifiedType();
10699 QualType ResultType
= mergeTypes(LHSValue
, RHSValue
, false,
10701 if (ResultType
.isNull())
10703 if (getCanonicalType(LHSValue
) == getCanonicalType(ResultType
))
10705 if (getCanonicalType(RHSValue
) == getCanonicalType(ResultType
))
10707 return getAtomicType(ResultType
);
10709 case Type::ConstantArray
:
10711 const ConstantArrayType
* LCAT
= getAsConstantArrayType(LHS
);
10712 const ConstantArrayType
* RCAT
= getAsConstantArrayType(RHS
);
10713 if (LCAT
&& RCAT
&& RCAT
->getSize() != LCAT
->getSize())
10716 QualType LHSElem
= getAsArrayType(LHS
)->getElementType();
10717 QualType RHSElem
= getAsArrayType(RHS
)->getElementType();
10719 LHSElem
= LHSElem
.getUnqualifiedType();
10720 RHSElem
= RHSElem
.getUnqualifiedType();
10723 QualType ResultType
= mergeTypes(LHSElem
, RHSElem
, false, Unqualified
);
10724 if (ResultType
.isNull())
10727 const VariableArrayType
* LVAT
= getAsVariableArrayType(LHS
);
10728 const VariableArrayType
* RVAT
= getAsVariableArrayType(RHS
);
10730 // If either side is a variable array, and both are complete, check whether
10731 // the current dimension is definite.
10732 if (LVAT
|| RVAT
) {
10733 auto SizeFetch
= [this](const VariableArrayType
* VAT
,
10734 const ConstantArrayType
* CAT
)
10735 -> std::pair
<bool,llvm::APInt
> {
10737 std::optional
<llvm::APSInt
> TheInt
;
10738 Expr
*E
= VAT
->getSizeExpr();
10739 if (E
&& (TheInt
= E
->getIntegerConstantExpr(*this)))
10740 return std::make_pair(true, *TheInt
);
10741 return std::make_pair(false, llvm::APSInt());
10744 return std::make_pair(true, CAT
->getSize());
10745 return std::make_pair(false, llvm::APInt());
10748 bool HaveLSize
, HaveRSize
;
10749 llvm::APInt LSize
, RSize
;
10750 std::tie(HaveLSize
, LSize
) = SizeFetch(LVAT
, LCAT
);
10751 std::tie(HaveRSize
, RSize
) = SizeFetch(RVAT
, RCAT
);
10752 if (HaveLSize
&& HaveRSize
&& !llvm::APInt::isSameValue(LSize
, RSize
))
10753 return {}; // Definite, but unequal, array dimension
10756 if (LCAT
&& getCanonicalType(LHSElem
) == getCanonicalType(ResultType
))
10758 if (RCAT
&& getCanonicalType(RHSElem
) == getCanonicalType(ResultType
))
10761 return getConstantArrayType(ResultType
, LCAT
->getSize(),
10762 LCAT
->getSizeExpr(), ArraySizeModifier(), 0);
10764 return getConstantArrayType(ResultType
, RCAT
->getSize(),
10765 RCAT
->getSizeExpr(), ArraySizeModifier(), 0);
10766 if (LVAT
&& getCanonicalType(LHSElem
) == getCanonicalType(ResultType
))
10768 if (RVAT
&& getCanonicalType(RHSElem
) == getCanonicalType(ResultType
))
10771 // FIXME: This isn't correct! But tricky to implement because
10772 // the array's size has to be the size of LHS, but the type
10773 // has to be different.
10777 // FIXME: This isn't correct! But tricky to implement because
10778 // the array's size has to be the size of RHS, but the type
10779 // has to be different.
10782 if (getCanonicalType(LHSElem
) == getCanonicalType(ResultType
)) return LHS
;
10783 if (getCanonicalType(RHSElem
) == getCanonicalType(ResultType
)) return RHS
;
10784 return getIncompleteArrayType(ResultType
, ArraySizeModifier(), 0);
10786 case Type::FunctionNoProto
:
10787 return mergeFunctionTypes(LHS
, RHS
, OfBlockPointer
, Unqualified
,
10788 /*AllowCXX=*/false, IsConditionalOperator
);
10792 case Type::Builtin
:
10793 // Only exactly equal builtin types are compatible, which is tested above.
10795 case Type::Complex
:
10796 // Distinct complex types are incompatible.
10799 // FIXME: The merged type should be an ExtVector!
10800 if (areCompatVectorTypes(LHSCan
->castAs
<VectorType
>(),
10801 RHSCan
->castAs
<VectorType
>()))
10804 case Type::ConstantMatrix
:
10805 if (areCompatMatrixTypes(LHSCan
->castAs
<ConstantMatrixType
>(),
10806 RHSCan
->castAs
<ConstantMatrixType
>()))
10809 case Type::ObjCObject
: {
10810 // Check if the types are assignment compatible.
10811 // FIXME: This should be type compatibility, e.g. whether
10812 // "LHS x; RHS x;" at global scope is legal.
10813 if (canAssignObjCInterfaces(LHS
->castAs
<ObjCObjectType
>(),
10814 RHS
->castAs
<ObjCObjectType
>()))
10818 case Type::ObjCObjectPointer
:
10819 if (OfBlockPointer
) {
10820 if (canAssignObjCInterfacesInBlockPointer(
10821 LHS
->castAs
<ObjCObjectPointerType
>(),
10822 RHS
->castAs
<ObjCObjectPointerType
>(), BlockReturnType
))
10826 if (canAssignObjCInterfaces(LHS
->castAs
<ObjCObjectPointerType
>(),
10827 RHS
->castAs
<ObjCObjectPointerType
>()))
10831 assert(LHS
!= RHS
&&
10832 "Equivalent pipe types should have already been handled!");
10834 case Type::BitInt
: {
10835 // Merge two bit-precise int types, while trying to preserve typedef info.
10836 bool LHSUnsigned
= LHS
->castAs
<BitIntType
>()->isUnsigned();
10837 bool RHSUnsigned
= RHS
->castAs
<BitIntType
>()->isUnsigned();
10838 unsigned LHSBits
= LHS
->castAs
<BitIntType
>()->getNumBits();
10839 unsigned RHSBits
= RHS
->castAs
<BitIntType
>()->getNumBits();
10841 // Like unsigned/int, shouldn't have a type if they don't match.
10842 if (LHSUnsigned
!= RHSUnsigned
)
10845 if (LHSBits
!= RHSBits
)
10851 llvm_unreachable("Invalid Type::Class!");
10854 bool ASTContext::mergeExtParameterInfo(
10855 const FunctionProtoType
*FirstFnType
, const FunctionProtoType
*SecondFnType
,
10856 bool &CanUseFirst
, bool &CanUseSecond
,
10857 SmallVectorImpl
<FunctionProtoType::ExtParameterInfo
> &NewParamInfos
) {
10858 assert(NewParamInfos
.empty() && "param info list not empty");
10859 CanUseFirst
= CanUseSecond
= true;
10860 bool FirstHasInfo
= FirstFnType
->hasExtParameterInfos();
10861 bool SecondHasInfo
= SecondFnType
->hasExtParameterInfos();
10863 // Fast path: if the first type doesn't have ext parameter infos,
10864 // we match if and only if the second type also doesn't have them.
10865 if (!FirstHasInfo
&& !SecondHasInfo
)
10868 bool NeedParamInfo
= false;
10869 size_t E
= FirstHasInfo
? FirstFnType
->getExtParameterInfos().size()
10870 : SecondFnType
->getExtParameterInfos().size();
10872 for (size_t I
= 0; I
< E
; ++I
) {
10873 FunctionProtoType::ExtParameterInfo FirstParam
, SecondParam
;
10875 FirstParam
= FirstFnType
->getExtParameterInfo(I
);
10877 SecondParam
= SecondFnType
->getExtParameterInfo(I
);
10879 // Cannot merge unless everything except the noescape flag matches.
10880 if (FirstParam
.withIsNoEscape(false) != SecondParam
.withIsNoEscape(false))
10883 bool FirstNoEscape
= FirstParam
.isNoEscape();
10884 bool SecondNoEscape
= SecondParam
.isNoEscape();
10885 bool IsNoEscape
= FirstNoEscape
&& SecondNoEscape
;
10886 NewParamInfos
.push_back(FirstParam
.withIsNoEscape(IsNoEscape
));
10887 if (NewParamInfos
.back().getOpaqueValue())
10888 NeedParamInfo
= true;
10889 if (FirstNoEscape
!= IsNoEscape
)
10890 CanUseFirst
= false;
10891 if (SecondNoEscape
!= IsNoEscape
)
10892 CanUseSecond
= false;
10895 if (!NeedParamInfo
)
10896 NewParamInfos
.clear();
10901 void ASTContext::ResetObjCLayout(const ObjCContainerDecl
*CD
) {
10902 ObjCLayouts
[CD
] = nullptr;
10905 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
10906 /// 'RHS' attributes and returns the merged version; including for function
10908 QualType
ASTContext::mergeObjCGCQualifiers(QualType LHS
, QualType RHS
) {
10909 QualType LHSCan
= getCanonicalType(LHS
),
10910 RHSCan
= getCanonicalType(RHS
);
10911 // If two types are identical, they are compatible.
10912 if (LHSCan
== RHSCan
)
10914 if (RHSCan
->isFunctionType()) {
10915 if (!LHSCan
->isFunctionType())
10917 QualType OldReturnType
=
10918 cast
<FunctionType
>(RHSCan
.getTypePtr())->getReturnType();
10919 QualType NewReturnType
=
10920 cast
<FunctionType
>(LHSCan
.getTypePtr())->getReturnType();
10921 QualType ResReturnType
=
10922 mergeObjCGCQualifiers(NewReturnType
, OldReturnType
);
10923 if (ResReturnType
.isNull())
10925 if (ResReturnType
== NewReturnType
|| ResReturnType
== OldReturnType
) {
10926 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
10927 // In either case, use OldReturnType to build the new function type.
10928 const auto *F
= LHS
->castAs
<FunctionType
>();
10929 if (const auto *FPT
= cast
<FunctionProtoType
>(F
)) {
10930 FunctionProtoType::ExtProtoInfo EPI
= FPT
->getExtProtoInfo();
10931 EPI
.ExtInfo
= getFunctionExtInfo(LHS
);
10932 QualType ResultType
=
10933 getFunctionType(OldReturnType
, FPT
->getParamTypes(), EPI
);
10940 // If the qualifiers are different, the types can still be merged.
10941 Qualifiers LQuals
= LHSCan
.getLocalQualifiers();
10942 Qualifiers RQuals
= RHSCan
.getLocalQualifiers();
10943 if (LQuals
!= RQuals
) {
10944 // If any of these qualifiers are different, we have a type mismatch.
10945 if (LQuals
.getCVRQualifiers() != RQuals
.getCVRQualifiers() ||
10946 LQuals
.getAddressSpace() != RQuals
.getAddressSpace())
10949 // Exactly one GC qualifier difference is allowed: __strong is
10950 // okay if the other type has no GC qualifier but is an Objective
10951 // C object pointer (i.e. implicitly strong by default). We fix
10952 // this by pretending that the unqualified type was actually
10953 // qualified __strong.
10954 Qualifiers::GC GC_L
= LQuals
.getObjCGCAttr();
10955 Qualifiers::GC GC_R
= RQuals
.getObjCGCAttr();
10956 assert((GC_L
!= GC_R
) && "unequal qualifier sets had only equal elements");
10958 if (GC_L
== Qualifiers::Weak
|| GC_R
== Qualifiers::Weak
)
10961 if (GC_L
== Qualifiers::Strong
)
10963 if (GC_R
== Qualifiers::Strong
)
10968 if (LHSCan
->isObjCObjectPointerType() && RHSCan
->isObjCObjectPointerType()) {
10969 QualType LHSBaseQT
= LHS
->castAs
<ObjCObjectPointerType
>()->getPointeeType();
10970 QualType RHSBaseQT
= RHS
->castAs
<ObjCObjectPointerType
>()->getPointeeType();
10971 QualType ResQT
= mergeObjCGCQualifiers(LHSBaseQT
, RHSBaseQT
);
10972 if (ResQT
== LHSBaseQT
)
10974 if (ResQT
== RHSBaseQT
)
10980 //===----------------------------------------------------------------------===//
10981 // Integer Predicates
10982 //===----------------------------------------------------------------------===//
10984 unsigned ASTContext::getIntWidth(QualType T
) const {
10985 if (const auto *ET
= T
->getAs
<EnumType
>())
10986 T
= ET
->getDecl()->getIntegerType();
10987 if (T
->isBooleanType())
10989 if (const auto *EIT
= T
->getAs
<BitIntType
>())
10990 return EIT
->getNumBits();
10991 // For builtin types, just use the standard type sizing method
10992 return (unsigned)getTypeSize(T
);
10995 QualType
ASTContext::getCorrespondingUnsignedType(QualType T
) const {
10996 assert((T
->hasIntegerRepresentation() || T
->isEnumeralType() ||
10997 T
->isFixedPointType()) &&
10998 "Unexpected type");
11000 // Turn <4 x signed int> -> <4 x unsigned int>
11001 if (const auto *VTy
= T
->getAs
<VectorType
>())
11002 return getVectorType(getCorrespondingUnsignedType(VTy
->getElementType()),
11003 VTy
->getNumElements(), VTy
->getVectorKind());
11005 // For _BitInt, return an unsigned _BitInt with same width.
11006 if (const auto *EITy
= T
->getAs
<BitIntType
>())
11007 return getBitIntType(/*Unsigned=*/true, EITy
->getNumBits());
11009 // For enums, get the underlying integer type of the enum, and let the general
11010 // integer type signchanging code handle it.
11011 if (const auto *ETy
= T
->getAs
<EnumType
>())
11012 T
= ETy
->getDecl()->getIntegerType();
11014 switch (T
->castAs
<BuiltinType
>()->getKind()) {
11015 case BuiltinType::Char_U
:
11016 // Plain `char` is mapped to `unsigned char` even if it's already unsigned
11017 case BuiltinType::Char_S
:
11018 case BuiltinType::SChar
:
11019 case BuiltinType::Char8
:
11020 return UnsignedCharTy
;
11021 case BuiltinType::Short
:
11022 return UnsignedShortTy
;
11023 case BuiltinType::Int
:
11024 return UnsignedIntTy
;
11025 case BuiltinType::Long
:
11026 return UnsignedLongTy
;
11027 case BuiltinType::LongLong
:
11028 return UnsignedLongLongTy
;
11029 case BuiltinType::Int128
:
11030 return UnsignedInt128Ty
;
11031 // wchar_t is special. It is either signed or not, but when it's signed,
11032 // there's no matching "unsigned wchar_t". Therefore we return the unsigned
11033 // version of its underlying type instead.
11034 case BuiltinType::WChar_S
:
11035 return getUnsignedWCharType();
11037 case BuiltinType::ShortAccum
:
11038 return UnsignedShortAccumTy
;
11039 case BuiltinType::Accum
:
11040 return UnsignedAccumTy
;
11041 case BuiltinType::LongAccum
:
11042 return UnsignedLongAccumTy
;
11043 case BuiltinType::SatShortAccum
:
11044 return SatUnsignedShortAccumTy
;
11045 case BuiltinType::SatAccum
:
11046 return SatUnsignedAccumTy
;
11047 case BuiltinType::SatLongAccum
:
11048 return SatUnsignedLongAccumTy
;
11049 case BuiltinType::ShortFract
:
11050 return UnsignedShortFractTy
;
11051 case BuiltinType::Fract
:
11052 return UnsignedFractTy
;
11053 case BuiltinType::LongFract
:
11054 return UnsignedLongFractTy
;
11055 case BuiltinType::SatShortFract
:
11056 return SatUnsignedShortFractTy
;
11057 case BuiltinType::SatFract
:
11058 return SatUnsignedFractTy
;
11059 case BuiltinType::SatLongFract
:
11060 return SatUnsignedLongFractTy
;
11062 assert((T
->hasUnsignedIntegerRepresentation() ||
11063 T
->isUnsignedFixedPointType()) &&
11064 "Unexpected signed integer or fixed point type");
11069 QualType
ASTContext::getCorrespondingSignedType(QualType T
) const {
11070 assert((T
->hasIntegerRepresentation() || T
->isEnumeralType() ||
11071 T
->isFixedPointType()) &&
11072 "Unexpected type");
11074 // Turn <4 x unsigned int> -> <4 x signed int>
11075 if (const auto *VTy
= T
->getAs
<VectorType
>())
11076 return getVectorType(getCorrespondingSignedType(VTy
->getElementType()),
11077 VTy
->getNumElements(), VTy
->getVectorKind());
11079 // For _BitInt, return a signed _BitInt with same width.
11080 if (const auto *EITy
= T
->getAs
<BitIntType
>())
11081 return getBitIntType(/*Unsigned=*/false, EITy
->getNumBits());
11083 // For enums, get the underlying integer type of the enum, and let the general
11084 // integer type signchanging code handle it.
11085 if (const auto *ETy
= T
->getAs
<EnumType
>())
11086 T
= ETy
->getDecl()->getIntegerType();
11088 switch (T
->castAs
<BuiltinType
>()->getKind()) {
11089 case BuiltinType::Char_S
:
11090 // Plain `char` is mapped to `signed char` even if it's already signed
11091 case BuiltinType::Char_U
:
11092 case BuiltinType::UChar
:
11093 case BuiltinType::Char8
:
11094 return SignedCharTy
;
11095 case BuiltinType::UShort
:
11097 case BuiltinType::UInt
:
11099 case BuiltinType::ULong
:
11101 case BuiltinType::ULongLong
:
11103 case BuiltinType::UInt128
:
11105 // wchar_t is special. It is either unsigned or not, but when it's unsigned,
11106 // there's no matching "signed wchar_t". Therefore we return the signed
11107 // version of its underlying type instead.
11108 case BuiltinType::WChar_U
:
11109 return getSignedWCharType();
11111 case BuiltinType::UShortAccum
:
11112 return ShortAccumTy
;
11113 case BuiltinType::UAccum
:
11115 case BuiltinType::ULongAccum
:
11116 return LongAccumTy
;
11117 case BuiltinType::SatUShortAccum
:
11118 return SatShortAccumTy
;
11119 case BuiltinType::SatUAccum
:
11121 case BuiltinType::SatULongAccum
:
11122 return SatLongAccumTy
;
11123 case BuiltinType::UShortFract
:
11124 return ShortFractTy
;
11125 case BuiltinType::UFract
:
11127 case BuiltinType::ULongFract
:
11128 return LongFractTy
;
11129 case BuiltinType::SatUShortFract
:
11130 return SatShortFractTy
;
11131 case BuiltinType::SatUFract
:
11133 case BuiltinType::SatULongFract
:
11134 return SatLongFractTy
;
11137 (T
->hasSignedIntegerRepresentation() || T
->isSignedFixedPointType()) &&
11138 "Unexpected signed integer or fixed point type");
11143 ASTMutationListener::~ASTMutationListener() = default;
11145 void ASTMutationListener::DeducedReturnType(const FunctionDecl
*FD
,
11146 QualType ReturnType
) {}
11148 //===----------------------------------------------------------------------===//
11149 // Builtin Type Computation
11150 //===----------------------------------------------------------------------===//
11152 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
11153 /// pointer over the consumed characters. This returns the resultant type. If
11154 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic
11155 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
11156 /// a vector of "i*".
11158 /// RequiresICE is filled in on return to indicate whether the value is required
11159 /// to be an Integer Constant Expression.
11160 static QualType
DecodeTypeFromStr(const char *&Str
, const ASTContext
&Context
,
11161 ASTContext::GetBuiltinTypeError
&Error
,
11163 bool AllowTypeModifiers
) {
11166 bool Signed
= false, Unsigned
= false;
11167 RequiresICE
= false;
11169 // Read the prefixed modifiers first.
11172 bool IsSpecial
= false;
11176 default: Done
= true; --Str
; break;
11178 RequiresICE
= true;
11181 assert(!Unsigned
&& "Can't use both 'S' and 'U' modifiers!");
11182 assert(!Signed
&& "Can't use 'S' modifier multiple times!");
11186 assert(!Signed
&& "Can't use both 'S' and 'U' modifiers!");
11187 assert(!Unsigned
&& "Can't use 'U' modifier multiple times!");
11191 assert(!IsSpecial
&& "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers");
11192 assert(HowLong
<= 2 && "Can't have LLLL modifier");
11196 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise.
11197 assert(!IsSpecial
&& "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
11198 assert(HowLong
== 0 && "Can't use both 'L' and 'N' modifiers!");
11202 if (Context
.getTargetInfo().getLongWidth() == 32)
11206 // This modifier represents int64 type.
11207 assert(!IsSpecial
&& "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
11208 assert(HowLong
== 0 && "Can't use both 'L' and 'W' modifiers!");
11212 switch (Context
.getTargetInfo().getInt64Type()) {
11214 llvm_unreachable("Unexpected integer type");
11215 case TargetInfo::SignedLong
:
11218 case TargetInfo::SignedLongLong
:
11224 // This modifier represents int32 type.
11225 assert(!IsSpecial
&& "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
11226 assert(HowLong
== 0 && "Can't use both 'L' and 'Z' modifiers!");
11230 switch (Context
.getTargetInfo().getIntTypeByWidth(32, true)) {
11232 llvm_unreachable("Unexpected integer type");
11233 case TargetInfo::SignedInt
:
11236 case TargetInfo::SignedLong
:
11239 case TargetInfo::SignedLongLong
:
11245 assert(!IsSpecial
&& "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
11246 assert(HowLong
== 0 && "Can't use both 'L' and 'O' modifiers!");
11250 if (Context
.getLangOpts().OpenCL
)
11260 // Read the base type.
11262 default: llvm_unreachable("Unknown builtin type letter!");
11264 assert(HowLong
== 0 && !Signed
&& !Unsigned
&&
11265 "Bad modifiers used with 'x'!");
11266 Type
= Context
.Float16Ty
;
11269 assert(HowLong
== 0 && !Signed
&& !Unsigned
&&
11270 "Bad modifiers used with 'y'!");
11271 Type
= Context
.BFloat16Ty
;
11274 assert(HowLong
== 0 && !Signed
&& !Unsigned
&&
11275 "Bad modifiers used with 'v'!");
11276 Type
= Context
.VoidTy
;
11279 assert(HowLong
== 0 && !Signed
&& !Unsigned
&&
11280 "Bad modifiers used with 'h'!");
11281 Type
= Context
.HalfTy
;
11284 assert(HowLong
== 0 && !Signed
&& !Unsigned
&&
11285 "Bad modifiers used with 'f'!");
11286 Type
= Context
.FloatTy
;
11289 assert(HowLong
< 3 && !Signed
&& !Unsigned
&&
11290 "Bad modifiers used with 'd'!");
11292 Type
= Context
.LongDoubleTy
;
11293 else if (HowLong
== 2)
11294 Type
= Context
.Float128Ty
;
11296 Type
= Context
.DoubleTy
;
11299 assert(HowLong
== 0 && "Bad modifiers used with 's'!");
11301 Type
= Context
.UnsignedShortTy
;
11303 Type
= Context
.ShortTy
;
11307 Type
= Unsigned
? Context
.UnsignedInt128Ty
: Context
.Int128Ty
;
11308 else if (HowLong
== 2)
11309 Type
= Unsigned
? Context
.UnsignedLongLongTy
: Context
.LongLongTy
;
11310 else if (HowLong
== 1)
11311 Type
= Unsigned
? Context
.UnsignedLongTy
: Context
.LongTy
;
11313 Type
= Unsigned
? Context
.UnsignedIntTy
: Context
.IntTy
;
11316 assert(HowLong
== 0 && "Bad modifiers used with 'c'!");
11318 Type
= Context
.SignedCharTy
;
11320 Type
= Context
.UnsignedCharTy
;
11322 Type
= Context
.CharTy
;
11324 case 'b': // boolean
11325 assert(HowLong
== 0 && !Signed
&& !Unsigned
&& "Bad modifiers for 'b'!");
11326 Type
= Context
.BoolTy
;
11328 case 'z': // size_t.
11329 assert(HowLong
== 0 && !Signed
&& !Unsigned
&& "Bad modifiers for 'z'!");
11330 Type
= Context
.getSizeType();
11332 case 'w': // wchar_t.
11333 assert(HowLong
== 0 && !Signed
&& !Unsigned
&& "Bad modifiers for 'w'!");
11334 Type
= Context
.getWideCharType();
11337 Type
= Context
.getCFConstantStringType();
11340 Type
= Context
.getObjCIdType();
11343 Type
= Context
.getObjCSelType();
11346 Type
= Context
.getObjCSuperType();
11349 Type
= Context
.getBuiltinVaListType();
11350 assert(!Type
.isNull() && "builtin va list type not initialized!");
11353 // This is a "reference" to a va_list; however, what exactly
11354 // this means depends on how va_list is defined. There are two
11355 // different kinds of va_list: ones passed by value, and ones
11356 // passed by reference. An example of a by-value va_list is
11357 // x86, where va_list is a char*. An example of by-ref va_list
11358 // is x86-64, where va_list is a __va_list_tag[1]. For x86,
11359 // we want this argument to be a char*&; for x86-64, we want
11360 // it to be a __va_list_tag*.
11361 Type
= Context
.getBuiltinVaListType();
11362 assert(!Type
.isNull() && "builtin va list type not initialized!");
11363 if (Type
->isArrayType())
11364 Type
= Context
.getArrayDecayedType(Type
);
11366 Type
= Context
.getLValueReferenceType(Type
);
11370 unsigned NumElements
= strtoul(Str
, &End
, 10);
11371 assert(End
!= Str
&& "Missing vector size");
11374 QualType ElementType
= DecodeTypeFromStr(Str
, Context
, Error
,
11375 RequiresICE
, false);
11376 assert(!RequiresICE
&& "Can't require vector ICE");
11378 Type
= Context
.getScalableVectorType(ElementType
, NumElements
);
11384 Type
= Context
.SveCountTy
;
11388 llvm_unreachable("Unexpected target builtin type");
11394 unsigned NumElements
= strtoul(Str
, &End
, 10);
11395 assert(End
!= Str
&& "Missing vector size");
11398 QualType ElementType
= DecodeTypeFromStr(Str
, Context
, Error
,
11399 RequiresICE
, false);
11400 assert(!RequiresICE
&& "Can't require vector ICE");
11402 // TODO: No way to make AltiVec vectors in builtins yet.
11403 Type
= Context
.getVectorType(ElementType
, NumElements
, VectorKind::Generic
);
11409 unsigned NumElements
= strtoul(Str
, &End
, 10);
11410 assert(End
!= Str
&& "Missing vector size");
11414 QualType ElementType
= DecodeTypeFromStr(Str
, Context
, Error
, RequiresICE
,
11416 Type
= Context
.getExtVectorType(ElementType
, NumElements
);
11420 QualType ElementType
= DecodeTypeFromStr(Str
, Context
, Error
, RequiresICE
,
11422 assert(!RequiresICE
&& "Can't require complex ICE");
11423 Type
= Context
.getComplexType(ElementType
);
11427 Type
= Context
.getPointerDiffType();
11430 Type
= Context
.getFILEType();
11431 if (Type
.isNull()) {
11432 Error
= ASTContext::GE_Missing_stdio
;
11438 Type
= Context
.getsigjmp_bufType();
11440 Type
= Context
.getjmp_bufType();
11442 if (Type
.isNull()) {
11443 Error
= ASTContext::GE_Missing_setjmp
;
11448 assert(HowLong
== 0 && !Signed
&& !Unsigned
&& "Bad modifiers for 'K'!");
11449 Type
= Context
.getucontext_tType();
11451 if (Type
.isNull()) {
11452 Error
= ASTContext::GE_Missing_ucontext
;
11457 Type
= Context
.getProcessIDType();
11461 // If there are modifiers and if we're allowed to parse them, go for it.
11462 Done
= !AllowTypeModifiers
;
11464 switch (char c
= *Str
++) {
11465 default: Done
= true; --Str
; break;
11468 // Both pointers and references can have their pointee types
11469 // qualified with an address space.
11471 unsigned AddrSpace
= strtoul(Str
, &End
, 10);
11473 // Note AddrSpace == 0 is not the same as an unspecified address space.
11474 Type
= Context
.getAddrSpaceQualType(
11476 Context
.getLangASForBuiltinAddressSpace(AddrSpace
));
11480 Type
= Context
.getPointerType(Type
);
11482 Type
= Context
.getLValueReferenceType(Type
);
11485 // FIXME: There's no way to have a built-in with an rvalue ref arg.
11487 Type
= Type
.withConst();
11490 Type
= Context
.getVolatileType(Type
);
11493 Type
= Type
.withRestrict();
11498 assert((!RequiresICE
|| Type
->isIntegralOrEnumerationType()) &&
11499 "Integer constant 'I' type must be an integer");
11504 // On some targets such as PowerPC, some of the builtins are defined with custom
11505 // type descriptors for target-dependent types. These descriptors are decoded in
11506 // other functions, but it may be useful to be able to fall back to default
11507 // descriptor decoding to define builtins mixing target-dependent and target-
11508 // independent types. This function allows decoding one type descriptor with
11509 // default decoding.
11510 QualType
ASTContext::DecodeTypeStr(const char *&Str
, const ASTContext
&Context
,
11511 GetBuiltinTypeError
&Error
, bool &RequireICE
,
11512 bool AllowTypeModifiers
) const {
11513 return DecodeTypeFromStr(Str
, Context
, Error
, RequireICE
, AllowTypeModifiers
);
11516 /// GetBuiltinType - Return the type for the specified builtin.
11517 QualType
ASTContext::GetBuiltinType(unsigned Id
,
11518 GetBuiltinTypeError
&Error
,
11519 unsigned *IntegerConstantArgs
) const {
11520 const char *TypeStr
= BuiltinInfo
.getTypeString(Id
);
11521 if (TypeStr
[0] == '\0') {
11522 Error
= GE_Missing_type
;
11526 SmallVector
<QualType
, 8> ArgTypes
;
11528 bool RequiresICE
= false;
11530 QualType ResType
= DecodeTypeFromStr(TypeStr
, *this, Error
,
11531 RequiresICE
, true);
11532 if (Error
!= GE_None
)
11535 assert(!RequiresICE
&& "Result of intrinsic cannot be required to be an ICE");
11537 while (TypeStr
[0] && TypeStr
[0] != '.') {
11538 QualType Ty
= DecodeTypeFromStr(TypeStr
, *this, Error
, RequiresICE
, true);
11539 if (Error
!= GE_None
)
11542 // If this argument is required to be an IntegerConstantExpression and the
11543 // caller cares, fill in the bitmask we return.
11544 if (RequiresICE
&& IntegerConstantArgs
)
11545 *IntegerConstantArgs
|= 1 << ArgTypes
.size();
11547 // Do array -> pointer decay. The builtin should use the decayed type.
11548 if (Ty
->isArrayType())
11549 Ty
= getArrayDecayedType(Ty
);
11551 ArgTypes
.push_back(Ty
);
11554 if (Id
== Builtin::BI__GetExceptionInfo
)
11557 assert((TypeStr
[0] != '.' || TypeStr
[1] == 0) &&
11558 "'.' should only occur at end of builtin type list!");
11560 bool Variadic
= (TypeStr
[0] == '.');
11562 FunctionType::ExtInfo
EI(getDefaultCallingConvention(
11563 Variadic
, /*IsCXXMethod=*/false, /*IsBuiltin=*/true));
11564 if (BuiltinInfo
.isNoReturn(Id
)) EI
= EI
.withNoReturn(true);
11567 // We really shouldn't be making a no-proto type here.
11568 if (ArgTypes
.empty() && Variadic
&& !getLangOpts().requiresStrictPrototypes())
11569 return getFunctionNoProtoType(ResType
, EI
);
11571 FunctionProtoType::ExtProtoInfo EPI
;
11573 EPI
.Variadic
= Variadic
;
11574 if (getLangOpts().CPlusPlus
&& BuiltinInfo
.isNoThrow(Id
))
11575 EPI
.ExceptionSpec
.Type
=
11576 getLangOpts().CPlusPlus11
? EST_BasicNoexcept
: EST_DynamicNone
;
11578 return getFunctionType(ResType
, ArgTypes
, EPI
);
11581 static GVALinkage
basicGVALinkageForFunction(const ASTContext
&Context
,
11582 const FunctionDecl
*FD
) {
11583 if (!FD
->isExternallyVisible())
11584 return GVA_Internal
;
11586 // Non-user-provided functions get emitted as weak definitions with every
11587 // use, no matter whether they've been explicitly instantiated etc.
11588 if (!FD
->isUserProvided())
11589 return GVA_DiscardableODR
;
11591 GVALinkage External
;
11592 switch (FD
->getTemplateSpecializationKind()) {
11593 case TSK_Undeclared
:
11594 case TSK_ExplicitSpecialization
:
11595 External
= GVA_StrongExternal
;
11598 case TSK_ExplicitInstantiationDefinition
:
11599 return GVA_StrongODR
;
11601 // C++11 [temp.explicit]p10:
11602 // [ Note: The intent is that an inline function that is the subject of
11603 // an explicit instantiation declaration will still be implicitly
11604 // instantiated when used so that the body can be considered for
11605 // inlining, but that no out-of-line copy of the inline function would be
11606 // generated in the translation unit. -- end note ]
11607 case TSK_ExplicitInstantiationDeclaration
:
11608 return GVA_AvailableExternally
;
11610 case TSK_ImplicitInstantiation
:
11611 External
= GVA_DiscardableODR
;
11615 if (!FD
->isInlined())
11618 if ((!Context
.getLangOpts().CPlusPlus
&&
11619 !Context
.getTargetInfo().getCXXABI().isMicrosoft() &&
11620 !FD
->hasAttr
<DLLExportAttr
>()) ||
11621 FD
->hasAttr
<GNUInlineAttr
>()) {
11622 // FIXME: This doesn't match gcc's behavior for dllexport inline functions.
11624 // GNU or C99 inline semantics. Determine whether this symbol should be
11625 // externally visible.
11626 if (FD
->isInlineDefinitionExternallyVisible())
11629 // C99 inline semantics, where the symbol is not externally visible.
11630 return GVA_AvailableExternally
;
11633 // Functions specified with extern and inline in -fms-compatibility mode
11634 // forcibly get emitted. While the body of the function cannot be later
11635 // replaced, the function definition cannot be discarded.
11636 if (FD
->isMSExternInline())
11637 return GVA_StrongODR
;
11639 if (Context
.getTargetInfo().getCXXABI().isMicrosoft() &&
11640 isa
<CXXConstructorDecl
>(FD
) &&
11641 cast
<CXXConstructorDecl
>(FD
)->isInheritingConstructor())
11642 // Our approach to inheriting constructors is fundamentally different from
11643 // that used by the MS ABI, so keep our inheriting constructor thunks
11644 // internal rather than trying to pick an unambiguous mangling for them.
11645 return GVA_Internal
;
11647 return GVA_DiscardableODR
;
11650 static GVALinkage
adjustGVALinkageForAttributes(const ASTContext
&Context
,
11651 const Decl
*D
, GVALinkage L
) {
11652 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
11653 // dllexport/dllimport on inline functions.
11654 if (D
->hasAttr
<DLLImportAttr
>()) {
11655 if (L
== GVA_DiscardableODR
|| L
== GVA_StrongODR
)
11656 return GVA_AvailableExternally
;
11657 } else if (D
->hasAttr
<DLLExportAttr
>()) {
11658 if (L
== GVA_DiscardableODR
)
11659 return GVA_StrongODR
;
11660 } else if (Context
.getLangOpts().CUDA
&& Context
.getLangOpts().CUDAIsDevice
) {
11661 // Device-side functions with __global__ attribute must always be
11662 // visible externally so they can be launched from host.
11663 if (D
->hasAttr
<CUDAGlobalAttr
>() &&
11664 (L
== GVA_DiscardableODR
|| L
== GVA_Internal
))
11665 return GVA_StrongODR
;
11666 // Single source offloading languages like CUDA/HIP need to be able to
11667 // access static device variables from host code of the same compilation
11668 // unit. This is done by externalizing the static variable with a shared
11669 // name between the host and device compilation which is the same for the
11670 // same compilation unit whereas different among different compilation
11672 if (Context
.shouldExternalize(D
))
11673 return GVA_StrongExternal
;
11678 /// Adjust the GVALinkage for a declaration based on what an external AST source
11679 /// knows about whether there can be other definitions of this declaration.
11681 adjustGVALinkageForExternalDefinitionKind(const ASTContext
&Ctx
, const Decl
*D
,
11683 ExternalASTSource
*Source
= Ctx
.getExternalSource();
11687 switch (Source
->hasExternalDefinitions(D
)) {
11688 case ExternalASTSource::EK_Never
:
11689 // Other translation units rely on us to provide the definition.
11690 if (L
== GVA_DiscardableODR
)
11691 return GVA_StrongODR
;
11694 case ExternalASTSource::EK_Always
:
11695 return GVA_AvailableExternally
;
11697 case ExternalASTSource::EK_ReplyHazy
:
11703 GVALinkage
ASTContext::GetGVALinkageForFunction(const FunctionDecl
*FD
) const {
11704 return adjustGVALinkageForExternalDefinitionKind(*this, FD
,
11705 adjustGVALinkageForAttributes(*this, FD
,
11706 basicGVALinkageForFunction(*this, FD
)));
11709 static GVALinkage
basicGVALinkageForVariable(const ASTContext
&Context
,
11710 const VarDecl
*VD
) {
11711 // As an extension for interactive REPLs, make sure constant variables are
11712 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl
11713 // marking them as internal.
11714 if (Context
.getLangOpts().CPlusPlus
&&
11715 Context
.getLangOpts().IncrementalExtensions
&&
11716 VD
->getType().isConstQualified() &&
11717 !VD
->getType().isVolatileQualified() && !VD
->isInline() &&
11718 !isa
<VarTemplateSpecializationDecl
>(VD
) && !VD
->getDescribedVarTemplate())
11719 return GVA_DiscardableODR
;
11721 if (!VD
->isExternallyVisible())
11722 return GVA_Internal
;
11724 if (VD
->isStaticLocal()) {
11725 const DeclContext
*LexicalContext
= VD
->getParentFunctionOrMethod();
11726 while (LexicalContext
&& !isa
<FunctionDecl
>(LexicalContext
))
11727 LexicalContext
= LexicalContext
->getLexicalParent();
11729 // ObjC Blocks can create local variables that don't have a FunctionDecl
11731 if (!LexicalContext
)
11732 return GVA_DiscardableODR
;
11734 // Otherwise, let the static local variable inherit its linkage from the
11735 // nearest enclosing function.
11736 auto StaticLocalLinkage
=
11737 Context
.GetGVALinkageForFunction(cast
<FunctionDecl
>(LexicalContext
));
11739 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must
11740 // be emitted in any object with references to the symbol for the object it
11741 // contains, whether inline or out-of-line."
11742 // Similar behavior is observed with MSVC. An alternative ABI could use
11743 // StrongODR/AvailableExternally to match the function, but none are
11744 // known/supported currently.
11745 if (StaticLocalLinkage
== GVA_StrongODR
||
11746 StaticLocalLinkage
== GVA_AvailableExternally
)
11747 return GVA_DiscardableODR
;
11748 return StaticLocalLinkage
;
11751 // MSVC treats in-class initialized static data members as definitions.
11752 // By giving them non-strong linkage, out-of-line definitions won't
11753 // cause link errors.
11754 if (Context
.isMSStaticDataMemberInlineDefinition(VD
))
11755 return GVA_DiscardableODR
;
11757 // Most non-template variables have strong linkage; inline variables are
11758 // linkonce_odr or (occasionally, for compatibility) weak_odr.
11759 GVALinkage StrongLinkage
;
11760 switch (Context
.getInlineVariableDefinitionKind(VD
)) {
11761 case ASTContext::InlineVariableDefinitionKind::None
:
11762 StrongLinkage
= GVA_StrongExternal
;
11764 case ASTContext::InlineVariableDefinitionKind::Weak
:
11765 case ASTContext::InlineVariableDefinitionKind::WeakUnknown
:
11766 StrongLinkage
= GVA_DiscardableODR
;
11768 case ASTContext::InlineVariableDefinitionKind::Strong
:
11769 StrongLinkage
= GVA_StrongODR
;
11773 switch (VD
->getTemplateSpecializationKind()) {
11774 case TSK_Undeclared
:
11775 return StrongLinkage
;
11777 case TSK_ExplicitSpecialization
:
11778 return Context
.getTargetInfo().getCXXABI().isMicrosoft() &&
11779 VD
->isStaticDataMember()
11783 case TSK_ExplicitInstantiationDefinition
:
11784 return GVA_StrongODR
;
11786 case TSK_ExplicitInstantiationDeclaration
:
11787 return GVA_AvailableExternally
;
11789 case TSK_ImplicitInstantiation
:
11790 return GVA_DiscardableODR
;
11793 llvm_unreachable("Invalid Linkage!");
11796 GVALinkage
ASTContext::GetGVALinkageForVariable(const VarDecl
*VD
) const {
11797 return adjustGVALinkageForExternalDefinitionKind(*this, VD
,
11798 adjustGVALinkageForAttributes(*this, VD
,
11799 basicGVALinkageForVariable(*this, VD
)));
11802 bool ASTContext::DeclMustBeEmitted(const Decl
*D
) {
11803 if (const auto *VD
= dyn_cast
<VarDecl
>(D
)) {
11804 if (!VD
->isFileVarDecl())
11806 // Global named register variables (GNU extension) are never emitted.
11807 if (VD
->getStorageClass() == SC_Register
)
11809 if (VD
->getDescribedVarTemplate() ||
11810 isa
<VarTemplatePartialSpecializationDecl
>(VD
))
11812 } else if (const auto *FD
= dyn_cast
<FunctionDecl
>(D
)) {
11813 // We never need to emit an uninstantiated function template.
11814 if (FD
->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate
)
11816 } else if (isa
<PragmaCommentDecl
>(D
))
11818 else if (isa
<PragmaDetectMismatchDecl
>(D
))
11820 else if (isa
<OMPRequiresDecl
>(D
))
11822 else if (isa
<OMPThreadPrivateDecl
>(D
))
11823 return !D
->getDeclContext()->isDependentContext();
11824 else if (isa
<OMPAllocateDecl
>(D
))
11825 return !D
->getDeclContext()->isDependentContext();
11826 else if (isa
<OMPDeclareReductionDecl
>(D
) || isa
<OMPDeclareMapperDecl
>(D
))
11827 return !D
->getDeclContext()->isDependentContext();
11828 else if (isa
<ImportDecl
>(D
))
11833 // If this is a member of a class template, we do not need to emit it.
11834 if (D
->getDeclContext()->isDependentContext())
11837 // Weak references don't produce any output by themselves.
11838 if (D
->hasAttr
<WeakRefAttr
>())
11841 // Aliases and used decls are required.
11842 if (D
->hasAttr
<AliasAttr
>() || D
->hasAttr
<UsedAttr
>())
11845 if (const auto *FD
= dyn_cast
<FunctionDecl
>(D
)) {
11846 // Forward declarations aren't required.
11847 if (!FD
->doesThisDeclarationHaveABody())
11848 return FD
->doesDeclarationForceExternallyVisibleDefinition();
11850 // Constructors and destructors are required.
11851 if (FD
->hasAttr
<ConstructorAttr
>() || FD
->hasAttr
<DestructorAttr
>())
11854 // The key function for a class is required. This rule only comes
11855 // into play when inline functions can be key functions, though.
11856 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
11857 if (const auto *MD
= dyn_cast
<CXXMethodDecl
>(FD
)) {
11858 const CXXRecordDecl
*RD
= MD
->getParent();
11859 if (MD
->isOutOfLine() && RD
->isDynamicClass()) {
11860 const CXXMethodDecl
*KeyFunc
= getCurrentKeyFunction(RD
);
11861 if (KeyFunc
&& KeyFunc
->getCanonicalDecl() == MD
->getCanonicalDecl())
11867 GVALinkage Linkage
= GetGVALinkageForFunction(FD
);
11869 // static, static inline, always_inline, and extern inline functions can
11870 // always be deferred. Normal inline functions can be deferred in C99/C++.
11871 // Implicit template instantiations can also be deferred in C++.
11872 return !isDiscardableGVALinkage(Linkage
);
11875 const auto *VD
= cast
<VarDecl
>(D
);
11876 assert(VD
->isFileVarDecl() && "Expected file scoped var");
11878 // If the decl is marked as `declare target to`, it should be emitted for the
11879 // host and for the device.
11880 if (LangOpts
.OpenMP
&&
11881 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD
))
11884 if (VD
->isThisDeclarationADefinition() == VarDecl::DeclarationOnly
&&
11885 !isMSStaticDataMemberInlineDefinition(VD
))
11888 // Variables in other module units shouldn't be forced to be emitted.
11889 if (VD
->isInAnotherModuleUnit())
11892 // Variables that can be needed in other TUs are required.
11893 auto Linkage
= GetGVALinkageForVariable(VD
);
11894 if (!isDiscardableGVALinkage(Linkage
))
11897 // We never need to emit a variable that is available in another TU.
11898 if (Linkage
== GVA_AvailableExternally
)
11901 // Variables that have destruction with side-effects are required.
11902 if (VD
->needsDestruction(*this))
11905 // Variables that have initialization with side-effects are required.
11906 if (VD
->getInit() && VD
->getInit()->HasSideEffects(*this) &&
11907 // We can get a value-dependent initializer during error recovery.
11908 (VD
->getInit()->isValueDependent() || !VD
->evaluateValue()))
11911 // Likewise, variables with tuple-like bindings are required if their
11912 // bindings have side-effects.
11913 if (const auto *DD
= dyn_cast
<DecompositionDecl
>(VD
))
11914 for (const auto *BD
: DD
->bindings())
11915 if (const auto *BindingVD
= BD
->getHoldingVar())
11916 if (DeclMustBeEmitted(BindingVD
))
11922 void ASTContext::forEachMultiversionedFunctionVersion(
11923 const FunctionDecl
*FD
,
11924 llvm::function_ref
<void(FunctionDecl
*)> Pred
) const {
11925 assert(FD
->isMultiVersion() && "Only valid for multiversioned functions");
11926 llvm::SmallDenseSet
<const FunctionDecl
*, 4> SeenDecls
;
11927 FD
= FD
->getMostRecentDecl();
11928 // FIXME: The order of traversal here matters and depends on the order of
11929 // lookup results, which happens to be (mostly) oldest-to-newest, but we
11930 // shouldn't rely on that.
11931 for (auto *CurDecl
:
11932 FD
->getDeclContext()->getRedeclContext()->lookup(FD
->getDeclName())) {
11933 FunctionDecl
*CurFD
= CurDecl
->getAsFunction()->getMostRecentDecl();
11934 if (CurFD
&& hasSameType(CurFD
->getType(), FD
->getType()) &&
11935 !SeenDecls
.contains(CurFD
)) {
11936 SeenDecls
.insert(CurFD
);
11942 CallingConv
ASTContext::getDefaultCallingConvention(bool IsVariadic
,
11944 bool IsBuiltin
) const {
11945 // Pass through to the C++ ABI object
11947 return ABI
->getDefaultMethodCallConv(IsVariadic
);
11949 // Builtins ignore user-specified default calling convention and remain the
11950 // Target's default calling convention.
11952 switch (LangOpts
.getDefaultCallingConv()) {
11953 case LangOptions::DCC_None
:
11955 case LangOptions::DCC_CDecl
:
11957 case LangOptions::DCC_FastCall
:
11958 if (getTargetInfo().hasFeature("sse2") && !IsVariadic
)
11959 return CC_X86FastCall
;
11961 case LangOptions::DCC_StdCall
:
11963 return CC_X86StdCall
;
11965 case LangOptions::DCC_VectorCall
:
11966 // __vectorcall cannot be applied to variadic functions.
11968 return CC_X86VectorCall
;
11970 case LangOptions::DCC_RegCall
:
11971 // __regcall cannot be applied to variadic functions.
11973 return CC_X86RegCall
;
11975 case LangOptions::DCC_RtdCall
:
11981 return Target
->getDefaultCallingConv();
11984 bool ASTContext::isNearlyEmpty(const CXXRecordDecl
*RD
) const {
11985 // Pass through to the C++ ABI object
11986 return ABI
->isNearlyEmpty(RD
);
11989 VTableContextBase
*ASTContext::getVTableContext() {
11990 if (!VTContext
.get()) {
11991 auto ABI
= Target
->getCXXABI();
11992 if (ABI
.isMicrosoft())
11993 VTContext
.reset(new MicrosoftVTableContext(*this));
11995 auto ComponentLayout
= getLangOpts().RelativeCXXABIVTables
11996 ? ItaniumVTableContext::Relative
11997 : ItaniumVTableContext::Pointer
;
11998 VTContext
.reset(new ItaniumVTableContext(*this, ComponentLayout
));
12001 return VTContext
.get();
12004 MangleContext
*ASTContext::createMangleContext(const TargetInfo
*T
) {
12007 switch (T
->getCXXABI().getKind()) {
12008 case TargetCXXABI::AppleARM64
:
12009 case TargetCXXABI::Fuchsia
:
12010 case TargetCXXABI::GenericAArch64
:
12011 case TargetCXXABI::GenericItanium
:
12012 case TargetCXXABI::GenericARM
:
12013 case TargetCXXABI::GenericMIPS
:
12014 case TargetCXXABI::iOS
:
12015 case TargetCXXABI::WebAssembly
:
12016 case TargetCXXABI::WatchOS
:
12017 case TargetCXXABI::XL
:
12018 return ItaniumMangleContext::create(*this, getDiagnostics());
12019 case TargetCXXABI::Microsoft
:
12020 return MicrosoftMangleContext::create(*this, getDiagnostics());
12022 llvm_unreachable("Unsupported ABI");
12025 MangleContext
*ASTContext::createDeviceMangleContext(const TargetInfo
&T
) {
12026 assert(T
.getCXXABI().getKind() != TargetCXXABI::Microsoft
&&
12027 "Device mangle context does not support Microsoft mangling.");
12028 switch (T
.getCXXABI().getKind()) {
12029 case TargetCXXABI::AppleARM64
:
12030 case TargetCXXABI::Fuchsia
:
12031 case TargetCXXABI::GenericAArch64
:
12032 case TargetCXXABI::GenericItanium
:
12033 case TargetCXXABI::GenericARM
:
12034 case TargetCXXABI::GenericMIPS
:
12035 case TargetCXXABI::iOS
:
12036 case TargetCXXABI::WebAssembly
:
12037 case TargetCXXABI::WatchOS
:
12038 case TargetCXXABI::XL
:
12039 return ItaniumMangleContext::create(
12040 *this, getDiagnostics(),
12041 [](ASTContext
&, const NamedDecl
*ND
) -> std::optional
<unsigned> {
12042 if (const auto *RD
= dyn_cast
<CXXRecordDecl
>(ND
))
12043 return RD
->getDeviceLambdaManglingNumber();
12044 return std::nullopt
;
12047 case TargetCXXABI::Microsoft
:
12048 return MicrosoftMangleContext::create(*this, getDiagnostics(),
12051 llvm_unreachable("Unsupported ABI");
12054 CXXABI::~CXXABI() = default;
12056 size_t ASTContext::getSideTableAllocatedMemory() const {
12057 return ASTRecordLayouts
.getMemorySize() +
12058 llvm::capacity_in_bytes(ObjCLayouts
) +
12059 llvm::capacity_in_bytes(KeyFunctions
) +
12060 llvm::capacity_in_bytes(ObjCImpls
) +
12061 llvm::capacity_in_bytes(BlockVarCopyInits
) +
12062 llvm::capacity_in_bytes(DeclAttrs
) +
12063 llvm::capacity_in_bytes(TemplateOrInstantiation
) +
12064 llvm::capacity_in_bytes(InstantiatedFromUsingDecl
) +
12065 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl
) +
12066 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl
) +
12067 llvm::capacity_in_bytes(OverriddenMethods
) +
12068 llvm::capacity_in_bytes(Types
) +
12069 llvm::capacity_in_bytes(VariableArrayTypes
);
12072 /// getIntTypeForBitwidth -
12073 /// sets integer QualTy according to specified details:
12074 /// bitwidth, signed/unsigned.
12075 /// Returns empty type if there is no appropriate target types.
12076 QualType
ASTContext::getIntTypeForBitwidth(unsigned DestWidth
,
12077 unsigned Signed
) const {
12078 TargetInfo::IntType Ty
= getTargetInfo().getIntTypeByWidth(DestWidth
, Signed
);
12079 CanQualType QualTy
= getFromTargetType(Ty
);
12080 if (!QualTy
&& DestWidth
== 128)
12081 return Signed
? Int128Ty
: UnsignedInt128Ty
;
12085 /// getRealTypeForBitwidth -
12086 /// sets floating point QualTy according to specified bitwidth.
12087 /// Returns empty type if there is no appropriate target types.
12088 QualType
ASTContext::getRealTypeForBitwidth(unsigned DestWidth
,
12089 FloatModeKind ExplicitType
) const {
12091 getTargetInfo().getRealTypeByWidth(DestWidth
, ExplicitType
);
12093 case FloatModeKind::Half
:
12095 case FloatModeKind::Float
:
12097 case FloatModeKind::Double
:
12099 case FloatModeKind::LongDouble
:
12100 return LongDoubleTy
;
12101 case FloatModeKind::Float128
:
12103 case FloatModeKind::Ibm128
:
12105 case FloatModeKind::NoFloat
:
12109 llvm_unreachable("Unhandled TargetInfo::RealType value");
12112 void ASTContext::setManglingNumber(const NamedDecl
*ND
, unsigned Number
) {
12114 MangleNumbers
[ND
] = Number
;
12117 unsigned ASTContext::getManglingNumber(const NamedDecl
*ND
,
12118 bool ForAuxTarget
) const {
12119 auto I
= MangleNumbers
.find(ND
);
12120 unsigned Res
= I
!= MangleNumbers
.end() ? I
->second
: 1;
12121 // CUDA/HIP host compilation encodes host and device mangling numbers
12122 // as lower and upper half of 32 bit integer.
12123 if (LangOpts
.CUDA
&& !LangOpts
.CUDAIsDevice
) {
12124 Res
= ForAuxTarget
? Res
>> 16 : Res
& 0xFFFF;
12126 assert(!ForAuxTarget
&& "Only CUDA/HIP host compilation supports mangling "
12127 "number for aux target");
12129 return Res
> 1 ? Res
: 1;
12132 void ASTContext::setStaticLocalNumber(const VarDecl
*VD
, unsigned Number
) {
12134 StaticLocalNumbers
[VD
] = Number
;
12137 unsigned ASTContext::getStaticLocalNumber(const VarDecl
*VD
) const {
12138 auto I
= StaticLocalNumbers
.find(VD
);
12139 return I
!= StaticLocalNumbers
.end() ? I
->second
: 1;
12142 MangleNumberingContext
&
12143 ASTContext::getManglingNumberContext(const DeclContext
*DC
) {
12144 assert(LangOpts
.CPlusPlus
); // We don't need mangling numbers for plain C.
12145 std::unique_ptr
<MangleNumberingContext
> &MCtx
= MangleNumberingContexts
[DC
];
12147 MCtx
= createMangleNumberingContext();
12151 MangleNumberingContext
&
12152 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t
, const Decl
*D
) {
12153 assert(LangOpts
.CPlusPlus
); // We don't need mangling numbers for plain C.
12154 std::unique_ptr
<MangleNumberingContext
> &MCtx
=
12155 ExtraMangleNumberingContexts
[D
];
12157 MCtx
= createMangleNumberingContext();
12161 std::unique_ptr
<MangleNumberingContext
>
12162 ASTContext::createMangleNumberingContext() const {
12163 return ABI
->createMangleNumberingContext();
12166 const CXXConstructorDecl
*
12167 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl
*RD
) {
12168 return ABI
->getCopyConstructorForExceptionObject(
12169 cast
<CXXRecordDecl
>(RD
->getFirstDecl()));
12172 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl
*RD
,
12173 CXXConstructorDecl
*CD
) {
12174 return ABI
->addCopyConstructorForExceptionObject(
12175 cast
<CXXRecordDecl
>(RD
->getFirstDecl()),
12176 cast
<CXXConstructorDecl
>(CD
->getFirstDecl()));
12179 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl
*TD
,
12180 TypedefNameDecl
*DD
) {
12181 return ABI
->addTypedefNameForUnnamedTagDecl(TD
, DD
);
12185 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl
*TD
) {
12186 return ABI
->getTypedefNameForUnnamedTagDecl(TD
);
12189 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl
*TD
,
12190 DeclaratorDecl
*DD
) {
12191 return ABI
->addDeclaratorForUnnamedTagDecl(TD
, DD
);
12194 DeclaratorDecl
*ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl
*TD
) {
12195 return ABI
->getDeclaratorForUnnamedTagDecl(TD
);
12198 void ASTContext::setParameterIndex(const ParmVarDecl
*D
, unsigned int index
) {
12199 ParamIndices
[D
] = index
;
12202 unsigned ASTContext::getParameterIndex(const ParmVarDecl
*D
) const {
12203 ParameterIndexTable::const_iterator I
= ParamIndices
.find(D
);
12204 assert(I
!= ParamIndices
.end() &&
12205 "ParmIndices lacks entry set by ParmVarDecl");
12209 QualType
ASTContext::getStringLiteralArrayType(QualType EltTy
,
12210 unsigned Length
) const {
12211 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
12212 if (getLangOpts().CPlusPlus
|| getLangOpts().ConstStrings
)
12213 EltTy
= EltTy
.withConst();
12215 EltTy
= adjustStringLiteralBaseType(EltTy
);
12217 // Get an array type for the string, according to C99 6.4.5. This includes
12218 // the null terminator character.
12219 return getConstantArrayType(EltTy
, llvm::APInt(32, Length
+ 1), nullptr,
12220 ArraySizeModifier::Normal
, /*IndexTypeQuals*/ 0);
12224 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key
) const {
12225 StringLiteral
*&Result
= StringLiteralCache
[Key
];
12227 Result
= StringLiteral::Create(
12228 *this, Key
, StringLiteralKind::Ordinary
,
12229 /*Pascal*/ false, getStringLiteralArrayType(CharTy
, Key
.size()),
12235 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts
) const {
12236 assert(MSGuidTagDecl
&& "building MS GUID without MS extensions?");
12238 llvm::FoldingSetNodeID ID
;
12239 MSGuidDecl::Profile(ID
, Parts
);
12242 if (MSGuidDecl
*Existing
= MSGuidDecls
.FindNodeOrInsertPos(ID
, InsertPos
))
12245 QualType GUIDType
= getMSGuidType().withConst();
12246 MSGuidDecl
*New
= MSGuidDecl::Create(*this, GUIDType
, Parts
);
12247 MSGuidDecls
.InsertNode(New
, InsertPos
);
12251 UnnamedGlobalConstantDecl
*
12252 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty
,
12253 const APValue
&APVal
) const {
12254 llvm::FoldingSetNodeID ID
;
12255 UnnamedGlobalConstantDecl::Profile(ID
, Ty
, APVal
);
12258 if (UnnamedGlobalConstantDecl
*Existing
=
12259 UnnamedGlobalConstantDecls
.FindNodeOrInsertPos(ID
, InsertPos
))
12262 UnnamedGlobalConstantDecl
*New
=
12263 UnnamedGlobalConstantDecl::Create(*this, Ty
, APVal
);
12264 UnnamedGlobalConstantDecls
.InsertNode(New
, InsertPos
);
12268 TemplateParamObjectDecl
*
12269 ASTContext::getTemplateParamObjectDecl(QualType T
, const APValue
&V
) const {
12270 assert(T
->isRecordType() && "template param object of unexpected type");
12272 // C++ [temp.param]p8:
12273 // [...] a static storage duration object of type 'const T' [...]
12276 llvm::FoldingSetNodeID ID
;
12277 TemplateParamObjectDecl::Profile(ID
, T
, V
);
12280 if (TemplateParamObjectDecl
*Existing
=
12281 TemplateParamObjectDecls
.FindNodeOrInsertPos(ID
, InsertPos
))
12284 TemplateParamObjectDecl
*New
= TemplateParamObjectDecl::Create(*this, T
, V
);
12285 TemplateParamObjectDecls
.InsertNode(New
, InsertPos
);
12289 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr
*E
) const {
12290 const llvm::Triple
&T
= getTargetInfo().getTriple();
12291 if (!T
.isOSDarwin())
12294 if (!(T
.isiOS() && T
.isOSVersionLT(7)) &&
12295 !(T
.isMacOSX() && T
.isOSVersionLT(10, 9)))
12298 QualType AtomicTy
= E
->getPtr()->getType()->getPointeeType();
12299 CharUnits sizeChars
= getTypeSizeInChars(AtomicTy
);
12300 uint64_t Size
= sizeChars
.getQuantity();
12301 CharUnits alignChars
= getTypeAlignInChars(AtomicTy
);
12302 unsigned Align
= alignChars
.getQuantity();
12303 unsigned MaxInlineWidthInBits
= getTargetInfo().getMaxAtomicInlineWidth();
12304 return (Size
!= Align
|| toBits(sizeChars
) > MaxInlineWidthInBits
);
12308 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl
*MethodDecl
,
12309 const ObjCMethodDecl
*MethodImpl
) {
12310 // No point trying to match an unavailable/deprecated mothod.
12311 if (MethodDecl
->hasAttr
<UnavailableAttr
>()
12312 || MethodDecl
->hasAttr
<DeprecatedAttr
>())
12314 if (MethodDecl
->getObjCDeclQualifier() !=
12315 MethodImpl
->getObjCDeclQualifier())
12317 if (!hasSameType(MethodDecl
->getReturnType(), MethodImpl
->getReturnType()))
12320 if (MethodDecl
->param_size() != MethodImpl
->param_size())
12323 for (ObjCMethodDecl::param_const_iterator IM
= MethodImpl
->param_begin(),
12324 IF
= MethodDecl
->param_begin(), EM
= MethodImpl
->param_end(),
12325 EF
= MethodDecl
->param_end();
12326 IM
!= EM
&& IF
!= EF
; ++IM
, ++IF
) {
12327 const ParmVarDecl
*DeclVar
= (*IF
);
12328 const ParmVarDecl
*ImplVar
= (*IM
);
12329 if (ImplVar
->getObjCDeclQualifier() != DeclVar
->getObjCDeclQualifier())
12331 if (!hasSameType(DeclVar
->getType(), ImplVar
->getType()))
12335 return (MethodDecl
->isVariadic() == MethodImpl
->isVariadic());
12338 uint64_t ASTContext::getTargetNullPointerValue(QualType QT
) const {
12340 if (QT
->getUnqualifiedDesugaredType()->isNullPtrType())
12341 AS
= LangAS::Default
;
12343 AS
= QT
->getPointeeType().getAddressSpace();
12345 return getTargetInfo().getNullPointerValue(AS
);
12348 unsigned ASTContext::getTargetAddressSpace(LangAS AS
) const {
12349 return getTargetInfo().getTargetAddressSpace(AS
);
12352 bool ASTContext::hasSameExpr(const Expr
*X
, const Expr
*Y
) const {
12357 llvm::FoldingSetNodeID IDX
, IDY
;
12358 X
->Profile(IDX
, *this, /*Canonical=*/true);
12359 Y
->Profile(IDY
, *this, /*Canonical=*/true);
12363 // The getCommon* helpers return, for given 'same' X and Y entities given as
12364 // inputs, another entity which is also the 'same' as the inputs, but which
12365 // is closer to the canonical form of the inputs, each according to a given
12367 // The getCommon*Checked variants are 'null inputs not-allowed' equivalents of
12368 // the regular ones.
12370 static Decl
*getCommonDecl(Decl
*X
, Decl
*Y
) {
12371 if (!declaresSameEntity(X
, Y
))
12373 for (const Decl
*DX
: X
->redecls()) {
12374 // If we reach Y before reaching the first decl, that means X is older.
12377 // If we reach the first decl, then Y is older.
12378 if (DX
->isFirstDecl())
12381 llvm_unreachable("Corrupt redecls chain");
12384 template <class T
, std::enable_if_t
<std::is_base_of_v
<Decl
, T
>, bool> = true>
12385 static T
*getCommonDecl(T
*X
, T
*Y
) {
12386 return cast_or_null
<T
>(
12387 getCommonDecl(const_cast<Decl
*>(cast_or_null
<Decl
>(X
)),
12388 const_cast<Decl
*>(cast_or_null
<Decl
>(Y
))));
12391 template <class T
, std::enable_if_t
<std::is_base_of_v
<Decl
, T
>, bool> = true>
12392 static T
*getCommonDeclChecked(T
*X
, T
*Y
) {
12393 return cast
<T
>(getCommonDecl(const_cast<Decl
*>(cast
<Decl
>(X
)),
12394 const_cast<Decl
*>(cast
<Decl
>(Y
))));
12397 static TemplateName
getCommonTemplateName(ASTContext
&Ctx
, TemplateName X
,
12399 if (X
.getAsVoidPointer() == Y
.getAsVoidPointer())
12401 // FIXME: There are cases here where we could find a common template name
12402 // with more sugar. For example one could be a SubstTemplateTemplate*
12403 // replacing the other.
12404 TemplateName CX
= Ctx
.getCanonicalTemplateName(X
);
12405 if (CX
.getAsVoidPointer() !=
12406 Ctx
.getCanonicalTemplateName(Y
).getAsVoidPointer())
12407 return TemplateName();
12411 static TemplateName
12412 getCommonTemplateNameChecked(ASTContext
&Ctx
, TemplateName X
, TemplateName Y
) {
12413 TemplateName R
= getCommonTemplateName(Ctx
, X
, Y
);
12414 assert(R
.getAsVoidPointer() != nullptr);
12418 static auto getCommonTypes(ASTContext
&Ctx
, ArrayRef
<QualType
> Xs
,
12419 ArrayRef
<QualType
> Ys
, bool Unqualified
= false) {
12420 assert(Xs
.size() == Ys
.size());
12421 SmallVector
<QualType
, 8> Rs(Xs
.size());
12422 for (size_t I
= 0; I
< Rs
.size(); ++I
)
12423 Rs
[I
] = Ctx
.getCommonSugaredType(Xs
[I
], Ys
[I
], Unqualified
);
12428 static SourceLocation
getCommonAttrLoc(const T
*X
, const T
*Y
) {
12429 return X
->getAttributeLoc() == Y
->getAttributeLoc() ? X
->getAttributeLoc()
12430 : SourceLocation();
12433 static TemplateArgument
getCommonTemplateArgument(ASTContext
&Ctx
,
12434 const TemplateArgument
&X
,
12435 const TemplateArgument
&Y
) {
12436 if (X
.getKind() != Y
.getKind())
12437 return TemplateArgument();
12439 switch (X
.getKind()) {
12440 case TemplateArgument::ArgKind::Type
:
12441 if (!Ctx
.hasSameType(X
.getAsType(), Y
.getAsType()))
12442 return TemplateArgument();
12443 return TemplateArgument(
12444 Ctx
.getCommonSugaredType(X
.getAsType(), Y
.getAsType()));
12445 case TemplateArgument::ArgKind::NullPtr
:
12446 if (!Ctx
.hasSameType(X
.getNullPtrType(), Y
.getNullPtrType()))
12447 return TemplateArgument();
12448 return TemplateArgument(
12449 Ctx
.getCommonSugaredType(X
.getNullPtrType(), Y
.getNullPtrType()),
12450 /*Unqualified=*/true);
12451 case TemplateArgument::ArgKind::Expression
:
12452 if (!Ctx
.hasSameType(X
.getAsExpr()->getType(), Y
.getAsExpr()->getType()))
12453 return TemplateArgument();
12454 // FIXME: Try to keep the common sugar.
12456 case TemplateArgument::ArgKind::Template
: {
12457 TemplateName TX
= X
.getAsTemplate(), TY
= Y
.getAsTemplate();
12458 TemplateName CTN
= ::getCommonTemplateName(Ctx
, TX
, TY
);
12459 if (!CTN
.getAsVoidPointer())
12460 return TemplateArgument();
12461 return TemplateArgument(CTN
);
12463 case TemplateArgument::ArgKind::TemplateExpansion
: {
12464 TemplateName TX
= X
.getAsTemplateOrTemplatePattern(),
12465 TY
= Y
.getAsTemplateOrTemplatePattern();
12466 TemplateName CTN
= ::getCommonTemplateName(Ctx
, TX
, TY
);
12467 if (!CTN
.getAsVoidPointer())
12468 return TemplateName();
12469 auto NExpX
= X
.getNumTemplateExpansions();
12470 assert(NExpX
== Y
.getNumTemplateExpansions());
12471 return TemplateArgument(CTN
, NExpX
);
12474 // FIXME: Handle the other argument kinds.
12479 static bool getCommonTemplateArguments(ASTContext
&Ctx
,
12480 SmallVectorImpl
<TemplateArgument
> &R
,
12481 ArrayRef
<TemplateArgument
> Xs
,
12482 ArrayRef
<TemplateArgument
> Ys
) {
12483 if (Xs
.size() != Ys
.size())
12485 R
.resize(Xs
.size());
12486 for (size_t I
= 0; I
< R
.size(); ++I
) {
12487 R
[I
] = getCommonTemplateArgument(Ctx
, Xs
[I
], Ys
[I
]);
12494 static auto getCommonTemplateArguments(ASTContext
&Ctx
,
12495 ArrayRef
<TemplateArgument
> Xs
,
12496 ArrayRef
<TemplateArgument
> Ys
) {
12497 SmallVector
<TemplateArgument
, 8> R
;
12498 bool Different
= getCommonTemplateArguments(Ctx
, R
, Xs
, Ys
);
12499 assert(!Different
);
12505 static ElaboratedTypeKeyword
getCommonTypeKeyword(const T
*X
, const T
*Y
) {
12506 return X
->getKeyword() == Y
->getKeyword() ? X
->getKeyword()
12507 : ElaboratedTypeKeyword::None
;
12511 static NestedNameSpecifier
*getCommonNNS(ASTContext
&Ctx
, const T
*X
,
12513 // FIXME: Try to keep the common NNS sugar.
12514 return X
->getQualifier() == Y
->getQualifier()
12515 ? X
->getQualifier()
12516 : Ctx
.getCanonicalNestedNameSpecifier(X
->getQualifier());
12520 static QualType
getCommonElementType(ASTContext
&Ctx
, const T
*X
, const T
*Y
) {
12521 return Ctx
.getCommonSugaredType(X
->getElementType(), Y
->getElementType());
12525 static QualType
getCommonArrayElementType(ASTContext
&Ctx
, const T
*X
,
12526 Qualifiers
&QX
, const T
*Y
,
12528 QualType EX
= X
->getElementType(), EY
= Y
->getElementType();
12529 QualType R
= Ctx
.getCommonSugaredType(EX
, EY
,
12530 /*Unqualified=*/true);
12531 Qualifiers RQ
= R
.getQualifiers();
12532 QX
+= EX
.getQualifiers() - RQ
;
12533 QY
+= EY
.getQualifiers() - RQ
;
12538 static QualType
getCommonPointeeType(ASTContext
&Ctx
, const T
*X
, const T
*Y
) {
12539 return Ctx
.getCommonSugaredType(X
->getPointeeType(), Y
->getPointeeType());
12542 template <class T
> static auto *getCommonSizeExpr(ASTContext
&Ctx
, T
*X
, T
*Y
) {
12543 assert(Ctx
.hasSameExpr(X
->getSizeExpr(), Y
->getSizeExpr()));
12544 return X
->getSizeExpr();
12547 static auto getCommonSizeModifier(const ArrayType
*X
, const ArrayType
*Y
) {
12548 assert(X
->getSizeModifier() == Y
->getSizeModifier());
12549 return X
->getSizeModifier();
12552 static auto getCommonIndexTypeCVRQualifiers(const ArrayType
*X
,
12553 const ArrayType
*Y
) {
12554 assert(X
->getIndexTypeCVRQualifiers() == Y
->getIndexTypeCVRQualifiers());
12555 return X
->getIndexTypeCVRQualifiers();
12558 // Merges two type lists such that the resulting vector will contain
12559 // each type (in a canonical sense) only once, in the order they appear
12560 // from X to Y. If they occur in both X and Y, the result will contain
12561 // the common sugared type between them.
12562 static void mergeTypeLists(ASTContext
&Ctx
, SmallVectorImpl
<QualType
> &Out
,
12563 ArrayRef
<QualType
> X
, ArrayRef
<QualType
> Y
) {
12564 llvm::DenseMap
<QualType
, unsigned> Found
;
12565 for (auto Ts
: {X
, Y
}) {
12566 for (QualType T
: Ts
) {
12567 auto Res
= Found
.try_emplace(Ctx
.getCanonicalType(T
), Out
.size());
12569 QualType
&U
= Out
[Res
.first
->second
];
12570 U
= Ctx
.getCommonSugaredType(U
, T
);
12572 Out
.emplace_back(T
);
12578 FunctionProtoType::ExceptionSpecInfo
12579 ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1
,
12580 FunctionProtoType::ExceptionSpecInfo ESI2
,
12581 SmallVectorImpl
<QualType
> &ExceptionTypeStorage
,
12582 bool AcceptDependent
) {
12583 ExceptionSpecificationType EST1
= ESI1
.Type
, EST2
= ESI2
.Type
;
12585 // If either of them can throw anything, that is the result.
12586 for (auto I
: {EST_None
, EST_MSAny
, EST_NoexceptFalse
}) {
12593 // If either of them is non-throwing, the result is the other.
12595 {EST_NoThrow
, EST_DynamicNone
, EST_BasicNoexcept
, EST_NoexceptTrue
}) {
12602 // If we're left with value-dependent computed noexcept expressions, we're
12603 // stuck. Before C++17, we can just drop the exception specification entirely,
12604 // since it's not actually part of the canonical type. And this should never
12605 // happen in C++17, because it would mean we were computing the composite
12606 // pointer type of dependent types, which should never happen.
12607 if (EST1
== EST_DependentNoexcept
|| EST2
== EST_DependentNoexcept
) {
12608 assert(AcceptDependent
&&
12609 "computing composite pointer type of dependent types");
12610 return FunctionProtoType::ExceptionSpecInfo();
12613 // Switch over the possibilities so that people adding new values know to
12614 // update this function.
12617 case EST_DynamicNone
:
12619 case EST_BasicNoexcept
:
12620 case EST_DependentNoexcept
:
12621 case EST_NoexceptFalse
:
12622 case EST_NoexceptTrue
:
12624 llvm_unreachable("These ESTs should be handled above");
12626 case EST_Dynamic
: {
12627 // This is the fun case: both exception specifications are dynamic. Form
12628 // the union of the two lists.
12629 assert(EST2
== EST_Dynamic
&& "other cases should already be handled");
12630 mergeTypeLists(*this, ExceptionTypeStorage
, ESI1
.Exceptions
,
12632 FunctionProtoType::ExceptionSpecInfo
Result(EST_Dynamic
);
12633 Result
.Exceptions
= ExceptionTypeStorage
;
12637 case EST_Unevaluated
:
12638 case EST_Uninstantiated
:
12640 llvm_unreachable("shouldn't see unresolved exception specifications here");
12643 llvm_unreachable("invalid ExceptionSpecificationType");
12646 static QualType
getCommonNonSugarTypeNode(ASTContext
&Ctx
, const Type
*X
,
12647 Qualifiers
&QX
, const Type
*Y
,
12649 Type::TypeClass TC
= X
->getTypeClass();
12650 assert(TC
== Y
->getTypeClass());
12652 #define UNEXPECTED_TYPE(Class, Kind) \
12653 case Type::Class: \
12654 llvm_unreachable("Unexpected " Kind ": " #Class);
12656 #define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical")
12657 #define TYPE(Class, Base)
12658 #include "clang/AST/TypeNodes.inc"
12660 #define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free")
12661 SUGAR_FREE_TYPE(Builtin
)
12662 SUGAR_FREE_TYPE(DeducedTemplateSpecialization
)
12663 SUGAR_FREE_TYPE(DependentBitInt
)
12664 SUGAR_FREE_TYPE(Enum
)
12665 SUGAR_FREE_TYPE(BitInt
)
12666 SUGAR_FREE_TYPE(ObjCInterface
)
12667 SUGAR_FREE_TYPE(Record
)
12668 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack
)
12669 SUGAR_FREE_TYPE(UnresolvedUsing
)
12670 #undef SUGAR_FREE_TYPE
12671 #define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique")
12672 NON_UNIQUE_TYPE(TypeOfExpr
)
12673 NON_UNIQUE_TYPE(VariableArray
)
12674 #undef NON_UNIQUE_TYPE
12676 UNEXPECTED_TYPE(TypeOf
, "sugar")
12678 #undef UNEXPECTED_TYPE
12681 const auto *AX
= cast
<AutoType
>(X
), *AY
= cast
<AutoType
>(Y
);
12682 assert(AX
->getDeducedType().isNull());
12683 assert(AY
->getDeducedType().isNull());
12684 assert(AX
->getKeyword() == AY
->getKeyword());
12685 assert(AX
->isInstantiationDependentType() ==
12686 AY
->isInstantiationDependentType());
12687 auto As
= getCommonTemplateArguments(Ctx
, AX
->getTypeConstraintArguments(),
12688 AY
->getTypeConstraintArguments());
12689 return Ctx
.getAutoType(QualType(), AX
->getKeyword(),
12690 AX
->isInstantiationDependentType(),
12691 AX
->containsUnexpandedParameterPack(),
12692 getCommonDeclChecked(AX
->getTypeConstraintConcept(),
12693 AY
->getTypeConstraintConcept()),
12696 case Type::IncompleteArray
: {
12697 const auto *AX
= cast
<IncompleteArrayType
>(X
),
12698 *AY
= cast
<IncompleteArrayType
>(Y
);
12699 return Ctx
.getIncompleteArrayType(
12700 getCommonArrayElementType(Ctx
, AX
, QX
, AY
, QY
),
12701 getCommonSizeModifier(AX
, AY
), getCommonIndexTypeCVRQualifiers(AX
, AY
));
12703 case Type::DependentSizedArray
: {
12704 const auto *AX
= cast
<DependentSizedArrayType
>(X
),
12705 *AY
= cast
<DependentSizedArrayType
>(Y
);
12706 return Ctx
.getDependentSizedArrayType(
12707 getCommonArrayElementType(Ctx
, AX
, QX
, AY
, QY
),
12708 getCommonSizeExpr(Ctx
, AX
, AY
), getCommonSizeModifier(AX
, AY
),
12709 getCommonIndexTypeCVRQualifiers(AX
, AY
),
12710 AX
->getBracketsRange() == AY
->getBracketsRange()
12711 ? AX
->getBracketsRange()
12714 case Type::ConstantArray
: {
12715 const auto *AX
= cast
<ConstantArrayType
>(X
),
12716 *AY
= cast
<ConstantArrayType
>(Y
);
12717 assert(AX
->getSize() == AY
->getSize());
12718 const Expr
*SizeExpr
= Ctx
.hasSameExpr(AX
->getSizeExpr(), AY
->getSizeExpr())
12719 ? AX
->getSizeExpr()
12721 return Ctx
.getConstantArrayType(
12722 getCommonArrayElementType(Ctx
, AX
, QX
, AY
, QY
), AX
->getSize(), SizeExpr
,
12723 getCommonSizeModifier(AX
, AY
), getCommonIndexTypeCVRQualifiers(AX
, AY
));
12725 case Type::Atomic
: {
12726 const auto *AX
= cast
<AtomicType
>(X
), *AY
= cast
<AtomicType
>(Y
);
12727 return Ctx
.getAtomicType(
12728 Ctx
.getCommonSugaredType(AX
->getValueType(), AY
->getValueType()));
12730 case Type::Complex
: {
12731 const auto *CX
= cast
<ComplexType
>(X
), *CY
= cast
<ComplexType
>(Y
);
12732 return Ctx
.getComplexType(getCommonArrayElementType(Ctx
, CX
, QX
, CY
, QY
));
12734 case Type::Pointer
: {
12735 const auto *PX
= cast
<PointerType
>(X
), *PY
= cast
<PointerType
>(Y
);
12736 return Ctx
.getPointerType(getCommonPointeeType(Ctx
, PX
, PY
));
12738 case Type::BlockPointer
: {
12739 const auto *PX
= cast
<BlockPointerType
>(X
), *PY
= cast
<BlockPointerType
>(Y
);
12740 return Ctx
.getBlockPointerType(getCommonPointeeType(Ctx
, PX
, PY
));
12742 case Type::ObjCObjectPointer
: {
12743 const auto *PX
= cast
<ObjCObjectPointerType
>(X
),
12744 *PY
= cast
<ObjCObjectPointerType
>(Y
);
12745 return Ctx
.getObjCObjectPointerType(getCommonPointeeType(Ctx
, PX
, PY
));
12747 case Type::MemberPointer
: {
12748 const auto *PX
= cast
<MemberPointerType
>(X
),
12749 *PY
= cast
<MemberPointerType
>(Y
);
12750 return Ctx
.getMemberPointerType(
12751 getCommonPointeeType(Ctx
, PX
, PY
),
12752 Ctx
.getCommonSugaredType(QualType(PX
->getClass(), 0),
12753 QualType(PY
->getClass(), 0))
12756 case Type::LValueReference
: {
12757 const auto *PX
= cast
<LValueReferenceType
>(X
),
12758 *PY
= cast
<LValueReferenceType
>(Y
);
12759 // FIXME: Preserve PointeeTypeAsWritten.
12760 return Ctx
.getLValueReferenceType(getCommonPointeeType(Ctx
, PX
, PY
),
12761 PX
->isSpelledAsLValue() ||
12762 PY
->isSpelledAsLValue());
12764 case Type::RValueReference
: {
12765 const auto *PX
= cast
<RValueReferenceType
>(X
),
12766 *PY
= cast
<RValueReferenceType
>(Y
);
12767 // FIXME: Preserve PointeeTypeAsWritten.
12768 return Ctx
.getRValueReferenceType(getCommonPointeeType(Ctx
, PX
, PY
));
12770 case Type::DependentAddressSpace
: {
12771 const auto *PX
= cast
<DependentAddressSpaceType
>(X
),
12772 *PY
= cast
<DependentAddressSpaceType
>(Y
);
12773 assert(Ctx
.hasSameExpr(PX
->getAddrSpaceExpr(), PY
->getAddrSpaceExpr()));
12774 return Ctx
.getDependentAddressSpaceType(getCommonPointeeType(Ctx
, PX
, PY
),
12775 PX
->getAddrSpaceExpr(),
12776 getCommonAttrLoc(PX
, PY
));
12778 case Type::FunctionNoProto
: {
12779 const auto *FX
= cast
<FunctionNoProtoType
>(X
),
12780 *FY
= cast
<FunctionNoProtoType
>(Y
);
12781 assert(FX
->getExtInfo() == FY
->getExtInfo());
12782 return Ctx
.getFunctionNoProtoType(
12783 Ctx
.getCommonSugaredType(FX
->getReturnType(), FY
->getReturnType()),
12786 case Type::FunctionProto
: {
12787 const auto *FX
= cast
<FunctionProtoType
>(X
),
12788 *FY
= cast
<FunctionProtoType
>(Y
);
12789 FunctionProtoType::ExtProtoInfo EPIX
= FX
->getExtProtoInfo(),
12790 EPIY
= FY
->getExtProtoInfo();
12791 assert(EPIX
.ExtInfo
== EPIY
.ExtInfo
);
12792 assert(EPIX
.ExtParameterInfos
== EPIY
.ExtParameterInfos
);
12793 assert(EPIX
.RefQualifier
== EPIY
.RefQualifier
);
12794 assert(EPIX
.TypeQuals
== EPIY
.TypeQuals
);
12795 assert(EPIX
.Variadic
== EPIY
.Variadic
);
12797 // FIXME: Can we handle an empty EllipsisLoc?
12798 // Use emtpy EllipsisLoc if X and Y differ.
12800 EPIX
.HasTrailingReturn
= EPIX
.HasTrailingReturn
&& EPIY
.HasTrailingReturn
;
12803 Ctx
.getCommonSugaredType(FX
->getReturnType(), FY
->getReturnType());
12804 auto P
= getCommonTypes(Ctx
, FX
->param_types(), FY
->param_types(),
12805 /*Unqualified=*/true);
12807 SmallVector
<QualType
, 8> Exceptions
;
12808 EPIX
.ExceptionSpec
= Ctx
.mergeExceptionSpecs(
12809 EPIX
.ExceptionSpec
, EPIY
.ExceptionSpec
, Exceptions
, true);
12810 return Ctx
.getFunctionType(R
, P
, EPIX
);
12812 case Type::ObjCObject
: {
12813 const auto *OX
= cast
<ObjCObjectType
>(X
), *OY
= cast
<ObjCObjectType
>(Y
);
12815 std::equal(OX
->getProtocols().begin(), OX
->getProtocols().end(),
12816 OY
->getProtocols().begin(), OY
->getProtocols().end(),
12817 [](const ObjCProtocolDecl
*P0
, const ObjCProtocolDecl
*P1
) {
12818 return P0
->getCanonicalDecl() == P1
->getCanonicalDecl();
12820 "protocol lists must be the same");
12821 auto TAs
= getCommonTypes(Ctx
, OX
->getTypeArgsAsWritten(),
12822 OY
->getTypeArgsAsWritten());
12823 return Ctx
.getObjCObjectType(
12824 Ctx
.getCommonSugaredType(OX
->getBaseType(), OY
->getBaseType()), TAs
,
12825 OX
->getProtocols(),
12826 OX
->isKindOfTypeAsWritten() && OY
->isKindOfTypeAsWritten());
12828 case Type::ConstantMatrix
: {
12829 const auto *MX
= cast
<ConstantMatrixType
>(X
),
12830 *MY
= cast
<ConstantMatrixType
>(Y
);
12831 assert(MX
->getNumRows() == MY
->getNumRows());
12832 assert(MX
->getNumColumns() == MY
->getNumColumns());
12833 return Ctx
.getConstantMatrixType(getCommonElementType(Ctx
, MX
, MY
),
12834 MX
->getNumRows(), MX
->getNumColumns());
12836 case Type::DependentSizedMatrix
: {
12837 const auto *MX
= cast
<DependentSizedMatrixType
>(X
),
12838 *MY
= cast
<DependentSizedMatrixType
>(Y
);
12839 assert(Ctx
.hasSameExpr(MX
->getRowExpr(), MY
->getRowExpr()));
12840 assert(Ctx
.hasSameExpr(MX
->getColumnExpr(), MY
->getColumnExpr()));
12841 return Ctx
.getDependentSizedMatrixType(
12842 getCommonElementType(Ctx
, MX
, MY
), MX
->getRowExpr(),
12843 MX
->getColumnExpr(), getCommonAttrLoc(MX
, MY
));
12845 case Type::Vector
: {
12846 const auto *VX
= cast
<VectorType
>(X
), *VY
= cast
<VectorType
>(Y
);
12847 assert(VX
->getNumElements() == VY
->getNumElements());
12848 assert(VX
->getVectorKind() == VY
->getVectorKind());
12849 return Ctx
.getVectorType(getCommonElementType(Ctx
, VX
, VY
),
12850 VX
->getNumElements(), VX
->getVectorKind());
12852 case Type::ExtVector
: {
12853 const auto *VX
= cast
<ExtVectorType
>(X
), *VY
= cast
<ExtVectorType
>(Y
);
12854 assert(VX
->getNumElements() == VY
->getNumElements());
12855 return Ctx
.getExtVectorType(getCommonElementType(Ctx
, VX
, VY
),
12856 VX
->getNumElements());
12858 case Type::DependentSizedExtVector
: {
12859 const auto *VX
= cast
<DependentSizedExtVectorType
>(X
),
12860 *VY
= cast
<DependentSizedExtVectorType
>(Y
);
12861 return Ctx
.getDependentSizedExtVectorType(getCommonElementType(Ctx
, VX
, VY
),
12862 getCommonSizeExpr(Ctx
, VX
, VY
),
12863 getCommonAttrLoc(VX
, VY
));
12865 case Type::DependentVector
: {
12866 const auto *VX
= cast
<DependentVectorType
>(X
),
12867 *VY
= cast
<DependentVectorType
>(Y
);
12868 assert(VX
->getVectorKind() == VY
->getVectorKind());
12869 return Ctx
.getDependentVectorType(
12870 getCommonElementType(Ctx
, VX
, VY
), getCommonSizeExpr(Ctx
, VX
, VY
),
12871 getCommonAttrLoc(VX
, VY
), VX
->getVectorKind());
12873 case Type::InjectedClassName
: {
12874 const auto *IX
= cast
<InjectedClassNameType
>(X
),
12875 *IY
= cast
<InjectedClassNameType
>(Y
);
12876 return Ctx
.getInjectedClassNameType(
12877 getCommonDeclChecked(IX
->getDecl(), IY
->getDecl()),
12878 Ctx
.getCommonSugaredType(IX
->getInjectedSpecializationType(),
12879 IY
->getInjectedSpecializationType()));
12881 case Type::TemplateSpecialization
: {
12882 const auto *TX
= cast
<TemplateSpecializationType
>(X
),
12883 *TY
= cast
<TemplateSpecializationType
>(Y
);
12884 auto As
= getCommonTemplateArguments(Ctx
, TX
->template_arguments(),
12885 TY
->template_arguments());
12886 return Ctx
.getTemplateSpecializationType(
12887 ::getCommonTemplateNameChecked(Ctx
, TX
->getTemplateName(),
12888 TY
->getTemplateName()),
12889 As
, X
->getCanonicalTypeInternal());
12891 case Type::Decltype
: {
12892 const auto *DX
= cast
<DecltypeType
>(X
);
12893 [[maybe_unused
]] const auto *DY
= cast
<DecltypeType
>(Y
);
12894 assert(DX
->isDependentType());
12895 assert(DY
->isDependentType());
12896 assert(Ctx
.hasSameExpr(DX
->getUnderlyingExpr(), DY
->getUnderlyingExpr()));
12897 // As Decltype is not uniqued, building a common type would be wasteful.
12898 return QualType(DX
, 0);
12900 case Type::DependentName
: {
12901 const auto *NX
= cast
<DependentNameType
>(X
),
12902 *NY
= cast
<DependentNameType
>(Y
);
12903 assert(NX
->getIdentifier() == NY
->getIdentifier());
12904 return Ctx
.getDependentNameType(
12905 getCommonTypeKeyword(NX
, NY
), getCommonNNS(Ctx
, NX
, NY
),
12906 NX
->getIdentifier(), NX
->getCanonicalTypeInternal());
12908 case Type::DependentTemplateSpecialization
: {
12909 const auto *TX
= cast
<DependentTemplateSpecializationType
>(X
),
12910 *TY
= cast
<DependentTemplateSpecializationType
>(Y
);
12911 assert(TX
->getIdentifier() == TY
->getIdentifier());
12912 auto As
= getCommonTemplateArguments(Ctx
, TX
->template_arguments(),
12913 TY
->template_arguments());
12914 return Ctx
.getDependentTemplateSpecializationType(
12915 getCommonTypeKeyword(TX
, TY
), getCommonNNS(Ctx
, TX
, TY
),
12916 TX
->getIdentifier(), As
);
12918 case Type::UnaryTransform
: {
12919 const auto *TX
= cast
<UnaryTransformType
>(X
),
12920 *TY
= cast
<UnaryTransformType
>(Y
);
12921 assert(TX
->getUTTKind() == TY
->getUTTKind());
12922 return Ctx
.getUnaryTransformType(
12923 Ctx
.getCommonSugaredType(TX
->getBaseType(), TY
->getBaseType()),
12924 Ctx
.getCommonSugaredType(TX
->getUnderlyingType(),
12925 TY
->getUnderlyingType()),
12928 case Type::PackExpansion
: {
12929 const auto *PX
= cast
<PackExpansionType
>(X
),
12930 *PY
= cast
<PackExpansionType
>(Y
);
12931 assert(PX
->getNumExpansions() == PY
->getNumExpansions());
12932 return Ctx
.getPackExpansionType(
12933 Ctx
.getCommonSugaredType(PX
->getPattern(), PY
->getPattern()),
12934 PX
->getNumExpansions(), false);
12937 const auto *PX
= cast
<PipeType
>(X
), *PY
= cast
<PipeType
>(Y
);
12938 assert(PX
->isReadOnly() == PY
->isReadOnly());
12939 auto MP
= PX
->isReadOnly() ? &ASTContext::getReadPipeType
12940 : &ASTContext::getWritePipeType
;
12941 return (Ctx
.*MP
)(getCommonElementType(Ctx
, PX
, PY
));
12943 case Type::TemplateTypeParm
: {
12944 const auto *TX
= cast
<TemplateTypeParmType
>(X
),
12945 *TY
= cast
<TemplateTypeParmType
>(Y
);
12946 assert(TX
->getDepth() == TY
->getDepth());
12947 assert(TX
->getIndex() == TY
->getIndex());
12948 assert(TX
->isParameterPack() == TY
->isParameterPack());
12949 return Ctx
.getTemplateTypeParmType(
12950 TX
->getDepth(), TX
->getIndex(), TX
->isParameterPack(),
12951 getCommonDecl(TX
->getDecl(), TY
->getDecl()));
12954 llvm_unreachable("Unknown Type Class");
12957 static QualType
getCommonSugarTypeNode(ASTContext
&Ctx
, const Type
*X
,
12959 SplitQualType Underlying
) {
12960 Type::TypeClass TC
= X
->getTypeClass();
12961 if (TC
!= Y
->getTypeClass())
12964 #define UNEXPECTED_TYPE(Class, Kind) \
12965 case Type::Class: \
12966 llvm_unreachable("Unexpected " Kind ": " #Class);
12967 #define TYPE(Class, Base)
12968 #define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent")
12969 #include "clang/AST/TypeNodes.inc"
12971 #define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical")
12972 CANONICAL_TYPE(Atomic
)
12973 CANONICAL_TYPE(BitInt
)
12974 CANONICAL_TYPE(BlockPointer
)
12975 CANONICAL_TYPE(Builtin
)
12976 CANONICAL_TYPE(Complex
)
12977 CANONICAL_TYPE(ConstantArray
)
12978 CANONICAL_TYPE(ConstantMatrix
)
12979 CANONICAL_TYPE(Enum
)
12980 CANONICAL_TYPE(ExtVector
)
12981 CANONICAL_TYPE(FunctionNoProto
)
12982 CANONICAL_TYPE(FunctionProto
)
12983 CANONICAL_TYPE(IncompleteArray
)
12984 CANONICAL_TYPE(LValueReference
)
12985 CANONICAL_TYPE(MemberPointer
)
12986 CANONICAL_TYPE(ObjCInterface
)
12987 CANONICAL_TYPE(ObjCObject
)
12988 CANONICAL_TYPE(ObjCObjectPointer
)
12989 CANONICAL_TYPE(Pipe
)
12990 CANONICAL_TYPE(Pointer
)
12991 CANONICAL_TYPE(Record
)
12992 CANONICAL_TYPE(RValueReference
)
12993 CANONICAL_TYPE(VariableArray
)
12994 CANONICAL_TYPE(Vector
)
12995 #undef CANONICAL_TYPE
12997 #undef UNEXPECTED_TYPE
12999 case Type::Adjusted
: {
13000 const auto *AX
= cast
<AdjustedType
>(X
), *AY
= cast
<AdjustedType
>(Y
);
13001 QualType OX
= AX
->getOriginalType(), OY
= AY
->getOriginalType();
13002 if (!Ctx
.hasSameType(OX
, OY
))
13004 // FIXME: It's inefficient to have to unify the original types.
13005 return Ctx
.getAdjustedType(Ctx
.getCommonSugaredType(OX
, OY
),
13006 Ctx
.getQualifiedType(Underlying
));
13008 case Type::Decayed
: {
13009 const auto *DX
= cast
<DecayedType
>(X
), *DY
= cast
<DecayedType
>(Y
);
13010 QualType OX
= DX
->getOriginalType(), OY
= DY
->getOriginalType();
13011 if (!Ctx
.hasSameType(OX
, OY
))
13013 // FIXME: It's inefficient to have to unify the original types.
13014 return Ctx
.getDecayedType(Ctx
.getCommonSugaredType(OX
, OY
),
13015 Ctx
.getQualifiedType(Underlying
));
13017 case Type::Attributed
: {
13018 const auto *AX
= cast
<AttributedType
>(X
), *AY
= cast
<AttributedType
>(Y
);
13019 AttributedType::Kind Kind
= AX
->getAttrKind();
13020 if (Kind
!= AY
->getAttrKind())
13022 QualType MX
= AX
->getModifiedType(), MY
= AY
->getModifiedType();
13023 if (!Ctx
.hasSameType(MX
, MY
))
13025 // FIXME: It's inefficient to have to unify the modified types.
13026 return Ctx
.getAttributedType(Kind
, Ctx
.getCommonSugaredType(MX
, MY
),
13027 Ctx
.getQualifiedType(Underlying
));
13029 case Type::BTFTagAttributed
: {
13030 const auto *BX
= cast
<BTFTagAttributedType
>(X
);
13031 const BTFTypeTagAttr
*AX
= BX
->getAttr();
13032 // The attribute is not uniqued, so just compare the tag.
13033 if (AX
->getBTFTypeTag() !=
13034 cast
<BTFTagAttributedType
>(Y
)->getAttr()->getBTFTypeTag())
13036 return Ctx
.getBTFTagAttributedType(AX
, Ctx
.getQualifiedType(Underlying
));
13039 const auto *AX
= cast
<AutoType
>(X
), *AY
= cast
<AutoType
>(Y
);
13041 AutoTypeKeyword KW
= AX
->getKeyword();
13042 if (KW
!= AY
->getKeyword())
13045 ConceptDecl
*CD
= ::getCommonDecl(AX
->getTypeConstraintConcept(),
13046 AY
->getTypeConstraintConcept());
13047 SmallVector
<TemplateArgument
, 8> As
;
13049 getCommonTemplateArguments(Ctx
, As
, AX
->getTypeConstraintArguments(),
13050 AY
->getTypeConstraintArguments())) {
13051 CD
= nullptr; // The arguments differ, so make it unconstrained.
13055 // Both auto types can't be dependent, otherwise they wouldn't have been
13056 // sugar. This implies they can't contain unexpanded packs either.
13057 return Ctx
.getAutoType(Ctx
.getQualifiedType(Underlying
), AX
->getKeyword(),
13058 /*IsDependent=*/false, /*IsPack=*/false, CD
, As
);
13060 case Type::Decltype
:
13062 case Type::DeducedTemplateSpecialization
:
13063 // FIXME: Try to merge these.
13066 case Type::Elaborated
: {
13067 const auto *EX
= cast
<ElaboratedType
>(X
), *EY
= cast
<ElaboratedType
>(Y
);
13068 return Ctx
.getElaboratedType(
13069 ::getCommonTypeKeyword(EX
, EY
), ::getCommonNNS(Ctx
, EX
, EY
),
13070 Ctx
.getQualifiedType(Underlying
),
13071 ::getCommonDecl(EX
->getOwnedTagDecl(), EY
->getOwnedTagDecl()));
13073 case Type::MacroQualified
: {
13074 const auto *MX
= cast
<MacroQualifiedType
>(X
),
13075 *MY
= cast
<MacroQualifiedType
>(Y
);
13076 const IdentifierInfo
*IX
= MX
->getMacroIdentifier();
13077 if (IX
!= MY
->getMacroIdentifier())
13079 return Ctx
.getMacroQualifiedType(Ctx
.getQualifiedType(Underlying
), IX
);
13081 case Type::SubstTemplateTypeParm
: {
13082 const auto *SX
= cast
<SubstTemplateTypeParmType
>(X
),
13083 *SY
= cast
<SubstTemplateTypeParmType
>(Y
);
13085 ::getCommonDecl(SX
->getAssociatedDecl(), SY
->getAssociatedDecl());
13088 unsigned Index
= SX
->getIndex();
13089 if (Index
!= SY
->getIndex())
13091 auto PackIndex
= SX
->getPackIndex();
13092 if (PackIndex
!= SY
->getPackIndex())
13094 return Ctx
.getSubstTemplateTypeParmType(Ctx
.getQualifiedType(Underlying
),
13095 CD
, Index
, PackIndex
);
13097 case Type::ObjCTypeParam
:
13098 // FIXME: Try to merge these.
13101 return Ctx
.getParenType(Ctx
.getQualifiedType(Underlying
));
13103 case Type::TemplateSpecialization
: {
13104 const auto *TX
= cast
<TemplateSpecializationType
>(X
),
13105 *TY
= cast
<TemplateSpecializationType
>(Y
);
13106 TemplateName CTN
= ::getCommonTemplateName(Ctx
, TX
->getTemplateName(),
13107 TY
->getTemplateName());
13108 if (!CTN
.getAsVoidPointer())
13110 SmallVector
<TemplateArgument
, 8> Args
;
13111 if (getCommonTemplateArguments(Ctx
, Args
, TX
->template_arguments(),
13112 TY
->template_arguments()))
13114 return Ctx
.getTemplateSpecializationType(CTN
, Args
,
13115 Ctx
.getQualifiedType(Underlying
));
13117 case Type::Typedef
: {
13118 const auto *TX
= cast
<TypedefType
>(X
), *TY
= cast
<TypedefType
>(Y
);
13119 const TypedefNameDecl
*CD
= ::getCommonDecl(TX
->getDecl(), TY
->getDecl());
13122 return Ctx
.getTypedefType(CD
, Ctx
.getQualifiedType(Underlying
));
13124 case Type::TypeOf
: {
13125 // The common sugar between two typeof expressions, where one is
13126 // potentially a typeof_unqual and the other is not, we unify to the
13127 // qualified type as that retains the most information along with the type.
13128 // We only return a typeof_unqual type when both types are unqual types.
13129 TypeOfKind Kind
= TypeOfKind::Qualified
;
13130 if (cast
<TypeOfType
>(X
)->getKind() == cast
<TypeOfType
>(Y
)->getKind() &&
13131 cast
<TypeOfType
>(X
)->getKind() == TypeOfKind::Unqualified
)
13132 Kind
= TypeOfKind::Unqualified
;
13133 return Ctx
.getTypeOfType(Ctx
.getQualifiedType(Underlying
), Kind
);
13135 case Type::TypeOfExpr
:
13138 case Type::UnaryTransform
: {
13139 const auto *UX
= cast
<UnaryTransformType
>(X
),
13140 *UY
= cast
<UnaryTransformType
>(Y
);
13141 UnaryTransformType::UTTKind KX
= UX
->getUTTKind();
13142 if (KX
!= UY
->getUTTKind())
13144 QualType BX
= UX
->getBaseType(), BY
= UY
->getBaseType();
13145 if (!Ctx
.hasSameType(BX
, BY
))
13147 // FIXME: It's inefficient to have to unify the base types.
13148 return Ctx
.getUnaryTransformType(Ctx
.getCommonSugaredType(BX
, BY
),
13149 Ctx
.getQualifiedType(Underlying
), KX
);
13151 case Type::Using
: {
13152 const auto *UX
= cast
<UsingType
>(X
), *UY
= cast
<UsingType
>(Y
);
13153 const UsingShadowDecl
*CD
=
13154 ::getCommonDecl(UX
->getFoundDecl(), UY
->getFoundDecl());
13157 return Ctx
.getUsingType(CD
, Ctx
.getQualifiedType(Underlying
));
13160 llvm_unreachable("Unhandled Type Class");
13163 static auto unwrapSugar(SplitQualType
&T
, Qualifiers
&QTotal
) {
13164 SmallVector
<SplitQualType
, 8> R
;
13166 QTotal
.addConsistentQualifiers(T
.Quals
);
13167 QualType NT
= T
.Ty
->getLocallyUnqualifiedSingleStepDesugaredType();
13168 if (NT
== QualType(T
.Ty
, 0))
13176 QualType
ASTContext::getCommonSugaredType(QualType X
, QualType Y
,
13177 bool Unqualified
) {
13178 assert(Unqualified
? hasSameUnqualifiedType(X
, Y
) : hasSameType(X
, Y
));
13181 if (!Unqualified
) {
13182 if (X
.isCanonical())
13184 if (Y
.isCanonical())
13188 SplitQualType SX
= X
.split(), SY
= Y
.split();
13190 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys,
13191 // until we reach their underlying "canonical nodes". Note these are not
13192 // necessarily canonical types, as they may still have sugared properties.
13193 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively.
13194 auto Xs
= ::unwrapSugar(SX
, QX
), Ys
= ::unwrapSugar(SY
, QY
);
13195 if (SX
.Ty
!= SY
.Ty
) {
13196 // The canonical nodes differ. Build a common canonical node out of the two,
13197 // unifying their sugar. This may recurse back here.
13199 ::getCommonNonSugarTypeNode(*this, SX
.Ty
, QX
, SY
.Ty
, QY
).getTypePtr();
13201 // The canonical nodes were identical: We may have desugared too much.
13202 // Add any common sugar back in.
13203 while (!Xs
.empty() && !Ys
.empty() && Xs
.back().Ty
== Ys
.back().Ty
) {
13206 SX
= Xs
.pop_back_val();
13207 SY
= Ys
.pop_back_val();
13211 QX
= Qualifiers::removeCommonQualifiers(QX
, QY
);
13215 // Even though the remaining sugar nodes in Xs and Ys differ, some may be
13216 // related. Walk up these nodes, unifying them and adding the result.
13217 while (!Xs
.empty() && !Ys
.empty()) {
13218 auto Underlying
= SplitQualType(
13219 SX
.Ty
, Qualifiers::removeCommonQualifiers(SX
.Quals
, SY
.Quals
));
13220 SX
= Xs
.pop_back_val();
13221 SY
= Ys
.pop_back_val();
13222 SX
.Ty
= ::getCommonSugarTypeNode(*this, SX
.Ty
, SY
.Ty
, Underlying
)
13223 .getTypePtrOrNull();
13224 // Stop at the first pair which is unrelated.
13226 SX
.Ty
= Underlying
.Ty
;
13229 QX
-= Underlying
.Quals
;
13232 // Add back the missing accumulated qualifiers, which were stripped off
13233 // with the sugar nodes we could not unify.
13234 QualType R
= getQualifiedType(SX
.Ty
, QX
);
13235 assert(Unqualified
? hasSameUnqualifiedType(R
, X
) : hasSameType(R
, X
));
13239 QualType
ASTContext::getCorrespondingSaturatedType(QualType Ty
) const {
13240 assert(Ty
->isFixedPointType());
13242 if (Ty
->isSaturatedFixedPointType()) return Ty
;
13244 switch (Ty
->castAs
<BuiltinType
>()->getKind()) {
13246 llvm_unreachable("Not a fixed point type!");
13247 case BuiltinType::ShortAccum
:
13248 return SatShortAccumTy
;
13249 case BuiltinType::Accum
:
13251 case BuiltinType::LongAccum
:
13252 return SatLongAccumTy
;
13253 case BuiltinType::UShortAccum
:
13254 return SatUnsignedShortAccumTy
;
13255 case BuiltinType::UAccum
:
13256 return SatUnsignedAccumTy
;
13257 case BuiltinType::ULongAccum
:
13258 return SatUnsignedLongAccumTy
;
13259 case BuiltinType::ShortFract
:
13260 return SatShortFractTy
;
13261 case BuiltinType::Fract
:
13263 case BuiltinType::LongFract
:
13264 return SatLongFractTy
;
13265 case BuiltinType::UShortFract
:
13266 return SatUnsignedShortFractTy
;
13267 case BuiltinType::UFract
:
13268 return SatUnsignedFractTy
;
13269 case BuiltinType::ULongFract
:
13270 return SatUnsignedLongFractTy
;
13274 LangAS
ASTContext::getLangASForBuiltinAddressSpace(unsigned AS
) const {
13275 if (LangOpts
.OpenCL
)
13276 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS
);
13279 return getTargetInfo().getCUDABuiltinAddressSpace(AS
);
13281 return getLangASFromTargetAS(AS
);
13284 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
13285 // doesn't include ASTContext.h
13287 clang::LazyGenerationalUpdatePtr
<
13288 const Decl
*, Decl
*, &ExternalASTSource::CompleteRedeclChain
>::ValueType
13289 clang::LazyGenerationalUpdatePtr
<
13290 const Decl
*, Decl
*, &ExternalASTSource::CompleteRedeclChain
>::makeValue(
13291 const clang::ASTContext
&Ctx
, Decl
*Value
);
13293 unsigned char ASTContext::getFixedPointScale(QualType Ty
) const {
13294 assert(Ty
->isFixedPointType());
13296 const TargetInfo
&Target
= getTargetInfo();
13297 switch (Ty
->castAs
<BuiltinType
>()->getKind()) {
13299 llvm_unreachable("Not a fixed point type!");
13300 case BuiltinType::ShortAccum
:
13301 case BuiltinType::SatShortAccum
:
13302 return Target
.getShortAccumScale();
13303 case BuiltinType::Accum
:
13304 case BuiltinType::SatAccum
:
13305 return Target
.getAccumScale();
13306 case BuiltinType::LongAccum
:
13307 case BuiltinType::SatLongAccum
:
13308 return Target
.getLongAccumScale();
13309 case BuiltinType::UShortAccum
:
13310 case BuiltinType::SatUShortAccum
:
13311 return Target
.getUnsignedShortAccumScale();
13312 case BuiltinType::UAccum
:
13313 case BuiltinType::SatUAccum
:
13314 return Target
.getUnsignedAccumScale();
13315 case BuiltinType::ULongAccum
:
13316 case BuiltinType::SatULongAccum
:
13317 return Target
.getUnsignedLongAccumScale();
13318 case BuiltinType::ShortFract
:
13319 case BuiltinType::SatShortFract
:
13320 return Target
.getShortFractScale();
13321 case BuiltinType::Fract
:
13322 case BuiltinType::SatFract
:
13323 return Target
.getFractScale();
13324 case BuiltinType::LongFract
:
13325 case BuiltinType::SatLongFract
:
13326 return Target
.getLongFractScale();
13327 case BuiltinType::UShortFract
:
13328 case BuiltinType::SatUShortFract
:
13329 return Target
.getUnsignedShortFractScale();
13330 case BuiltinType::UFract
:
13331 case BuiltinType::SatUFract
:
13332 return Target
.getUnsignedFractScale();
13333 case BuiltinType::ULongFract
:
13334 case BuiltinType::SatULongFract
:
13335 return Target
.getUnsignedLongFractScale();
13339 unsigned char ASTContext::getFixedPointIBits(QualType Ty
) const {
13340 assert(Ty
->isFixedPointType());
13342 const TargetInfo
&Target
= getTargetInfo();
13343 switch (Ty
->castAs
<BuiltinType
>()->getKind()) {
13345 llvm_unreachable("Not a fixed point type!");
13346 case BuiltinType::ShortAccum
:
13347 case BuiltinType::SatShortAccum
:
13348 return Target
.getShortAccumIBits();
13349 case BuiltinType::Accum
:
13350 case BuiltinType::SatAccum
:
13351 return Target
.getAccumIBits();
13352 case BuiltinType::LongAccum
:
13353 case BuiltinType::SatLongAccum
:
13354 return Target
.getLongAccumIBits();
13355 case BuiltinType::UShortAccum
:
13356 case BuiltinType::SatUShortAccum
:
13357 return Target
.getUnsignedShortAccumIBits();
13358 case BuiltinType::UAccum
:
13359 case BuiltinType::SatUAccum
:
13360 return Target
.getUnsignedAccumIBits();
13361 case BuiltinType::ULongAccum
:
13362 case BuiltinType::SatULongAccum
:
13363 return Target
.getUnsignedLongAccumIBits();
13364 case BuiltinType::ShortFract
:
13365 case BuiltinType::SatShortFract
:
13366 case BuiltinType::Fract
:
13367 case BuiltinType::SatFract
:
13368 case BuiltinType::LongFract
:
13369 case BuiltinType::SatLongFract
:
13370 case BuiltinType::UShortFract
:
13371 case BuiltinType::SatUShortFract
:
13372 case BuiltinType::UFract
:
13373 case BuiltinType::SatUFract
:
13374 case BuiltinType::ULongFract
:
13375 case BuiltinType::SatULongFract
:
13380 llvm::FixedPointSemantics
13381 ASTContext::getFixedPointSemantics(QualType Ty
) const {
13382 assert((Ty
->isFixedPointType() || Ty
->isIntegerType()) &&
13383 "Can only get the fixed point semantics for a "
13384 "fixed point or integer type.");
13385 if (Ty
->isIntegerType())
13386 return llvm::FixedPointSemantics::GetIntegerSemantics(
13387 getIntWidth(Ty
), Ty
->isSignedIntegerType());
13389 bool isSigned
= Ty
->isSignedFixedPointType();
13390 return llvm::FixedPointSemantics(
13391 static_cast<unsigned>(getTypeSize(Ty
)), getFixedPointScale(Ty
), isSigned
,
13392 Ty
->isSaturatedFixedPointType(),
13393 !isSigned
&& getTargetInfo().doUnsignedFixedPointTypesHavePadding());
13396 llvm::APFixedPoint
ASTContext::getFixedPointMax(QualType Ty
) const {
13397 assert(Ty
->isFixedPointType());
13398 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty
));
13401 llvm::APFixedPoint
ASTContext::getFixedPointMin(QualType Ty
) const {
13402 assert(Ty
->isFixedPointType());
13403 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty
));
13406 QualType
ASTContext::getCorrespondingSignedFixedPointType(QualType Ty
) const {
13407 assert(Ty
->isUnsignedFixedPointType() &&
13408 "Expected unsigned fixed point type");
13410 switch (Ty
->castAs
<BuiltinType
>()->getKind()) {
13411 case BuiltinType::UShortAccum
:
13412 return ShortAccumTy
;
13413 case BuiltinType::UAccum
:
13415 case BuiltinType::ULongAccum
:
13416 return LongAccumTy
;
13417 case BuiltinType::SatUShortAccum
:
13418 return SatShortAccumTy
;
13419 case BuiltinType::SatUAccum
:
13421 case BuiltinType::SatULongAccum
:
13422 return SatLongAccumTy
;
13423 case BuiltinType::UShortFract
:
13424 return ShortFractTy
;
13425 case BuiltinType::UFract
:
13427 case BuiltinType::ULongFract
:
13428 return LongFractTy
;
13429 case BuiltinType::SatUShortFract
:
13430 return SatShortFractTy
;
13431 case BuiltinType::SatUFract
:
13433 case BuiltinType::SatULongFract
:
13434 return SatLongFractTy
;
13436 llvm_unreachable("Unexpected unsigned fixed point type");
13440 std::vector
<std::string
> ASTContext::filterFunctionTargetVersionAttrs(
13441 const TargetVersionAttr
*TV
) const {
13442 assert(TV
!= nullptr);
13443 llvm::SmallVector
<StringRef
, 8> Feats
;
13444 std::vector
<std::string
> ResFeats
;
13445 TV
->getFeatures(Feats
);
13446 for (auto &Feature
: Feats
)
13447 if (Target
->validateCpuSupports(Feature
.str()))
13448 // Use '?' to mark features that came from TargetVersion.
13449 ResFeats
.push_back("?" + Feature
.str());
13454 ASTContext::filterFunctionTargetAttrs(const TargetAttr
*TD
) const {
13455 assert(TD
!= nullptr);
13456 ParsedTargetAttr ParsedAttr
= Target
->parseTargetAttr(TD
->getFeaturesStr());
13458 llvm::erase_if(ParsedAttr
.Features
, [&](const std::string
&Feat
) {
13459 return !Target
->isValidFeatureName(StringRef
{Feat
}.substr(1));
13464 void ASTContext::getFunctionFeatureMap(llvm::StringMap
<bool> &FeatureMap
,
13465 const FunctionDecl
*FD
) const {
13467 getFunctionFeatureMap(FeatureMap
, GlobalDecl().getWithDecl(FD
));
13469 Target
->initFeatureMap(FeatureMap
, getDiagnostics(),
13470 Target
->getTargetOpts().CPU
,
13471 Target
->getTargetOpts().Features
);
13474 // Fills in the supplied string map with the set of target features for the
13475 // passed in function.
13476 void ASTContext::getFunctionFeatureMap(llvm::StringMap
<bool> &FeatureMap
,
13477 GlobalDecl GD
) const {
13478 StringRef TargetCPU
= Target
->getTargetOpts().CPU
;
13479 const FunctionDecl
*FD
= GD
.getDecl()->getAsFunction();
13480 if (const auto *TD
= FD
->getAttr
<TargetAttr
>()) {
13481 ParsedTargetAttr ParsedAttr
= filterFunctionTargetAttrs(TD
);
13483 // Make a copy of the features as passed on the command line into the
13484 // beginning of the additional features from the function to override.
13485 ParsedAttr
.Features
.insert(
13486 ParsedAttr
.Features
.begin(),
13487 Target
->getTargetOpts().FeaturesAsWritten
.begin(),
13488 Target
->getTargetOpts().FeaturesAsWritten
.end());
13490 if (ParsedAttr
.CPU
!= "" && Target
->isValidCPUName(ParsedAttr
.CPU
))
13491 TargetCPU
= ParsedAttr
.CPU
;
13493 // Now populate the feature map, first with the TargetCPU which is either
13494 // the default or a new one from the target attribute string. Then we'll use
13495 // the passed in features (FeaturesAsWritten) along with the new ones from
13497 Target
->initFeatureMap(FeatureMap
, getDiagnostics(), TargetCPU
,
13498 ParsedAttr
.Features
);
13499 } else if (const auto *SD
= FD
->getAttr
<CPUSpecificAttr
>()) {
13500 llvm::SmallVector
<StringRef
, 32> FeaturesTmp
;
13501 Target
->getCPUSpecificCPUDispatchFeatures(
13502 SD
->getCPUName(GD
.getMultiVersionIndex())->getName(), FeaturesTmp
);
13503 std::vector
<std::string
> Features(FeaturesTmp
.begin(), FeaturesTmp
.end());
13504 Features
.insert(Features
.begin(),
13505 Target
->getTargetOpts().FeaturesAsWritten
.begin(),
13506 Target
->getTargetOpts().FeaturesAsWritten
.end());
13507 Target
->initFeatureMap(FeatureMap
, getDiagnostics(), TargetCPU
, Features
);
13508 } else if (const auto *TC
= FD
->getAttr
<TargetClonesAttr
>()) {
13509 std::vector
<std::string
> Features
;
13510 StringRef VersionStr
= TC
->getFeatureStr(GD
.getMultiVersionIndex());
13511 if (Target
->getTriple().isAArch64()) {
13512 // TargetClones for AArch64
13513 if (VersionStr
!= "default") {
13514 SmallVector
<StringRef
, 1> VersionFeatures
;
13515 VersionStr
.split(VersionFeatures
, "+");
13516 for (auto &VFeature
: VersionFeatures
) {
13517 VFeature
= VFeature
.trim();
13518 // Use '?' to mark features that came from AArch64 TargetClones.
13519 Features
.push_back((StringRef
{"?"} + VFeature
).str());
13522 Features
.insert(Features
.begin(),
13523 Target
->getTargetOpts().FeaturesAsWritten
.begin(),
13524 Target
->getTargetOpts().FeaturesAsWritten
.end());
13526 if (VersionStr
.startswith("arch="))
13527 TargetCPU
= VersionStr
.drop_front(sizeof("arch=") - 1);
13528 else if (VersionStr
!= "default")
13529 Features
.push_back((StringRef
{"+"} + VersionStr
).str());
13531 Target
->initFeatureMap(FeatureMap
, getDiagnostics(), TargetCPU
, Features
);
13532 } else if (const auto *TV
= FD
->getAttr
<TargetVersionAttr
>()) {
13533 std::vector
<std::string
> Feats
= filterFunctionTargetVersionAttrs(TV
);
13534 Feats
.insert(Feats
.begin(),
13535 Target
->getTargetOpts().FeaturesAsWritten
.begin(),
13536 Target
->getTargetOpts().FeaturesAsWritten
.end());
13537 Target
->initFeatureMap(FeatureMap
, getDiagnostics(), TargetCPU
, Feats
);
13539 FeatureMap
= Target
->getTargetOpts().FeatureMap
;
13543 OMPTraitInfo
&ASTContext::getNewOMPTraitInfo() {
13544 OMPTraitInfoVector
.emplace_back(new OMPTraitInfo());
13545 return *OMPTraitInfoVector
.back();
13548 const StreamingDiagnostic
&clang::
13549 operator<<(const StreamingDiagnostic
&DB
,
13550 const ASTContext::SectionInfo
&Section
) {
13552 return DB
<< Section
.Decl
;
13553 return DB
<< "a prior #pragma section";
13556 bool ASTContext::mayExternalize(const Decl
*D
) const {
13557 bool IsInternalVar
=
13559 basicGVALinkageForVariable(*this, cast
<VarDecl
>(D
)) == GVA_Internal
;
13560 bool IsExplicitDeviceVar
= (D
->hasAttr
<CUDADeviceAttr
>() &&
13561 !D
->getAttr
<CUDADeviceAttr
>()->isImplicit()) ||
13562 (D
->hasAttr
<CUDAConstantAttr
>() &&
13563 !D
->getAttr
<CUDAConstantAttr
>()->isImplicit());
13564 // CUDA/HIP: managed variables need to be externalized since it is
13565 // a declaration in IR, therefore cannot have internal linkage. Kernels in
13566 // anonymous name space needs to be externalized to avoid duplicate symbols.
13567 return (IsInternalVar
&&
13568 (D
->hasAttr
<HIPManagedAttr
>() || IsExplicitDeviceVar
)) ||
13569 (D
->hasAttr
<CUDAGlobalAttr
>() &&
13570 basicGVALinkageForFunction(*this, cast
<FunctionDecl
>(D
)) ==
13574 bool ASTContext::shouldExternalize(const Decl
*D
) const {
13575 return mayExternalize(D
) &&
13576 (D
->hasAttr
<HIPManagedAttr
>() || D
->hasAttr
<CUDAGlobalAttr
>() ||
13577 CUDADeviceVarODRUsedByHost
.count(cast
<VarDecl
>(D
)));
13580 StringRef
ASTContext::getCUIDHash() const {
13581 if (!CUIDHash
.empty())
13583 if (LangOpts
.CUID
.empty())
13584 return StringRef();
13585 CUIDHash
= llvm::utohexstr(llvm::MD5Hash(LangOpts
.CUID
), /*LowerCase=*/true);