1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This provides C++ code generation targeting the Itanium C++ ABI. The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
12 // https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 // https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
18 //===----------------------------------------------------------------------===//
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
41 using namespace clang
;
42 using namespace CodeGen
;
45 class ItaniumCXXABI
: public CodeGen::CGCXXABI
{
46 /// VTables - All the vtables which have been defined.
47 llvm::DenseMap
<const CXXRecordDecl
*, llvm::GlobalVariable
*> VTables
;
49 /// All the thread wrapper functions that have been used.
50 llvm::SmallVector
<std::pair
<const VarDecl
*, llvm::Function
*>, 8>
54 bool UseARMMethodPtrABI
;
55 bool UseARMGuardVarABI
;
56 bool Use32BitVTableOffsetABI
;
58 ItaniumMangleContext
&getMangleContext() {
59 return cast
<ItaniumMangleContext
>(CodeGen::CGCXXABI::getMangleContext());
63 ItaniumCXXABI(CodeGen::CodeGenModule
&CGM
,
64 bool UseARMMethodPtrABI
= false,
65 bool UseARMGuardVarABI
= false) :
66 CGCXXABI(CGM
), UseARMMethodPtrABI(UseARMMethodPtrABI
),
67 UseARMGuardVarABI(UseARMGuardVarABI
),
68 Use32BitVTableOffsetABI(false) { }
70 bool classifyReturnType(CGFunctionInfo
&FI
) const override
;
72 RecordArgABI
getRecordArgABI(const CXXRecordDecl
*RD
) const override
{
73 // If C++ prohibits us from making a copy, pass by address.
74 if (!RD
->canPassInRegisters())
79 bool isThisCompleteObject(GlobalDecl GD
) const override
{
80 // The Itanium ABI has separate complete-object vs. base-object
81 // variants of both constructors and destructors.
82 if (isa
<CXXDestructorDecl
>(GD
.getDecl())) {
83 switch (GD
.getDtorType()) {
92 llvm_unreachable("emitting dtor comdat as function?");
94 llvm_unreachable("bad dtor kind");
96 if (isa
<CXXConstructorDecl
>(GD
.getDecl())) {
97 switch (GD
.getCtorType()) {
104 case Ctor_CopyingClosure
:
105 case Ctor_DefaultClosure
:
106 llvm_unreachable("closure ctors in Itanium ABI?");
109 llvm_unreachable("emitting ctor comdat as function?");
111 llvm_unreachable("bad dtor kind");
118 bool isZeroInitializable(const MemberPointerType
*MPT
) override
;
120 llvm::Type
*ConvertMemberPointerType(const MemberPointerType
*MPT
) override
;
123 EmitLoadOfMemberFunctionPointer(CodeGenFunction
&CGF
,
126 llvm::Value
*&ThisPtrForCall
,
127 llvm::Value
*MemFnPtr
,
128 const MemberPointerType
*MPT
) override
;
131 EmitMemberDataPointerAddress(CodeGenFunction
&CGF
, const Expr
*E
,
134 const MemberPointerType
*MPT
) override
;
136 llvm::Value
*EmitMemberPointerConversion(CodeGenFunction
&CGF
,
138 llvm::Value
*Src
) override
;
139 llvm::Constant
*EmitMemberPointerConversion(const CastExpr
*E
,
140 llvm::Constant
*Src
) override
;
142 llvm::Constant
*EmitNullMemberPointer(const MemberPointerType
*MPT
) override
;
144 llvm::Constant
*EmitMemberFunctionPointer(const CXXMethodDecl
*MD
) override
;
145 llvm::Constant
*EmitMemberDataPointer(const MemberPointerType
*MPT
,
146 CharUnits offset
) override
;
147 llvm::Constant
*EmitMemberPointer(const APValue
&MP
, QualType MPT
) override
;
148 llvm::Constant
*BuildMemberPointer(const CXXMethodDecl
*MD
,
149 CharUnits ThisAdjustment
);
151 llvm::Value
*EmitMemberPointerComparison(CodeGenFunction
&CGF
,
152 llvm::Value
*L
, llvm::Value
*R
,
153 const MemberPointerType
*MPT
,
154 bool Inequality
) override
;
156 llvm::Value
*EmitMemberPointerIsNotNull(CodeGenFunction
&CGF
,
158 const MemberPointerType
*MPT
) override
;
160 void emitVirtualObjectDelete(CodeGenFunction
&CGF
, const CXXDeleteExpr
*DE
,
161 Address Ptr
, QualType ElementType
,
162 const CXXDestructorDecl
*Dtor
) override
;
164 void emitRethrow(CodeGenFunction
&CGF
, bool isNoReturn
) override
;
165 void emitThrow(CodeGenFunction
&CGF
, const CXXThrowExpr
*E
) override
;
167 void emitBeginCatch(CodeGenFunction
&CGF
, const CXXCatchStmt
*C
) override
;
170 emitTerminateForUnexpectedException(CodeGenFunction
&CGF
,
171 llvm::Value
*Exn
) override
;
173 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl
*RD
);
174 llvm::Constant
*getAddrOfRTTIDescriptor(QualType Ty
) override
;
176 getAddrOfCXXCatchHandlerType(QualType Ty
,
177 QualType CatchHandlerType
) override
{
178 return CatchTypeInfo
{getAddrOfRTTIDescriptor(Ty
), 0};
181 bool shouldTypeidBeNullChecked(bool IsDeref
, QualType SrcRecordTy
) override
;
182 void EmitBadTypeidCall(CodeGenFunction
&CGF
) override
;
183 llvm::Value
*EmitTypeid(CodeGenFunction
&CGF
, QualType SrcRecordTy
,
185 llvm::Type
*StdTypeInfoPtrTy
) override
;
187 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr
,
188 QualType SrcRecordTy
) override
;
190 /// Determine whether we know that all instances of type RecordTy will have
191 /// the same vtable pointer values, that is distinct from all other vtable
192 /// pointers. While this is required by the Itanium ABI, it doesn't happen in
193 /// practice in some cases due to language extensions.
194 bool hasUniqueVTablePointer(QualType RecordTy
) {
195 const CXXRecordDecl
*RD
= RecordTy
->getAsCXXRecordDecl();
197 // Under -fapple-kext, multiple definitions of the same vtable may be
199 if (!CGM
.getCodeGenOpts().AssumeUniqueVTables
||
200 getContext().getLangOpts().AppleKext
)
203 // If the type_info* would be null, the vtable might be merged with that of
205 if (!CGM
.shouldEmitRTTI())
208 // If there's only one definition of the vtable in the program, it has a
210 if (!llvm::GlobalValue::isWeakForLinker(CGM
.getVTableLinkage(RD
)))
213 // Even if there are multiple definitions of the vtable, they are required
214 // by the ABI to use the same symbol name, so should be merged at load
215 // time. However, if the class has hidden visibility, there can be
216 // different versions of the class in different modules, and the ABI
217 // library might treat them as being the same.
218 if (CGM
.GetLLVMVisibility(RD
->getVisibility()) !=
219 llvm::GlobalValue::DefaultVisibility
)
225 bool shouldEmitExactDynamicCast(QualType DestRecordTy
) override
{
226 return hasUniqueVTablePointer(DestRecordTy
);
229 llvm::Value
*emitDynamicCastCall(CodeGenFunction
&CGF
, Address Value
,
230 QualType SrcRecordTy
, QualType DestTy
,
231 QualType DestRecordTy
,
232 llvm::BasicBlock
*CastEnd
) override
;
234 llvm::Value
*emitExactDynamicCast(CodeGenFunction
&CGF
, Address ThisAddr
,
235 QualType SrcRecordTy
, QualType DestTy
,
236 QualType DestRecordTy
,
237 llvm::BasicBlock
*CastSuccess
,
238 llvm::BasicBlock
*CastFail
) override
;
240 llvm::Value
*emitDynamicCastToVoid(CodeGenFunction
&CGF
, Address Value
,
241 QualType SrcRecordTy
) override
;
243 bool EmitBadCastCall(CodeGenFunction
&CGF
) override
;
246 GetVirtualBaseClassOffset(CodeGenFunction
&CGF
, Address This
,
247 const CXXRecordDecl
*ClassDecl
,
248 const CXXRecordDecl
*BaseClassDecl
) override
;
250 void EmitCXXConstructors(const CXXConstructorDecl
*D
) override
;
252 AddedStructorArgCounts
253 buildStructorSignature(GlobalDecl GD
,
254 SmallVectorImpl
<CanQualType
> &ArgTys
) override
;
256 bool useThunkForDtorVariant(const CXXDestructorDecl
*Dtor
,
257 CXXDtorType DT
) const override
{
258 // Itanium does not emit any destructor variant as an inline thunk.
259 // Delegating may occur as an optimization, but all variants are either
260 // emitted with external linkage or as linkonce if they are inline and used.
264 void EmitCXXDestructors(const CXXDestructorDecl
*D
) override
;
266 void addImplicitStructorParams(CodeGenFunction
&CGF
, QualType
&ResTy
,
267 FunctionArgList
&Params
) override
;
269 void EmitInstanceFunctionProlog(CodeGenFunction
&CGF
) override
;
271 AddedStructorArgs
getImplicitConstructorArgs(CodeGenFunction
&CGF
,
272 const CXXConstructorDecl
*D
,
275 bool Delegating
) override
;
277 llvm::Value
*getCXXDestructorImplicitParam(CodeGenFunction
&CGF
,
278 const CXXDestructorDecl
*DD
,
281 bool Delegating
) override
;
283 void EmitDestructorCall(CodeGenFunction
&CGF
, const CXXDestructorDecl
*DD
,
284 CXXDtorType Type
, bool ForVirtualBase
,
285 bool Delegating
, Address This
,
286 QualType ThisTy
) override
;
288 void emitVTableDefinitions(CodeGenVTables
&CGVT
,
289 const CXXRecordDecl
*RD
) override
;
291 bool isVirtualOffsetNeededForVTableField(CodeGenFunction
&CGF
,
292 CodeGenFunction::VPtr Vptr
) override
;
294 bool doStructorsInitializeVPtrs(const CXXRecordDecl
*VTableClass
) override
{
299 getVTableAddressPoint(BaseSubobject Base
,
300 const CXXRecordDecl
*VTableClass
) override
;
302 llvm::Value
*getVTableAddressPointInStructor(
303 CodeGenFunction
&CGF
, const CXXRecordDecl
*VTableClass
,
304 BaseSubobject Base
, const CXXRecordDecl
*NearestVBase
) override
;
306 llvm::Value
*getVTableAddressPointInStructorWithVTT(
307 CodeGenFunction
&CGF
, const CXXRecordDecl
*VTableClass
,
308 BaseSubobject Base
, const CXXRecordDecl
*NearestVBase
);
311 getVTableAddressPointForConstExpr(BaseSubobject Base
,
312 const CXXRecordDecl
*VTableClass
) override
;
314 llvm::GlobalVariable
*getAddrOfVTable(const CXXRecordDecl
*RD
,
315 CharUnits VPtrOffset
) override
;
317 CGCallee
getVirtualFunctionPointer(CodeGenFunction
&CGF
, GlobalDecl GD
,
318 Address This
, llvm::Type
*Ty
,
319 SourceLocation Loc
) override
;
321 llvm::Value
*EmitVirtualDestructorCall(CodeGenFunction
&CGF
,
322 const CXXDestructorDecl
*Dtor
,
323 CXXDtorType DtorType
, Address This
,
324 DeleteOrMemberCallExpr E
) override
;
326 void emitVirtualInheritanceTables(const CXXRecordDecl
*RD
) override
;
328 bool canSpeculativelyEmitVTable(const CXXRecordDecl
*RD
) const override
;
329 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl
*RD
) const;
331 void setThunkLinkage(llvm::Function
*Thunk
, bool ForVTable
, GlobalDecl GD
,
332 bool ReturnAdjustment
) override
{
333 // Allow inlining of thunks by emitting them with available_externally
334 // linkage together with vtables when needed.
335 if (ForVTable
&& !Thunk
->hasLocalLinkage())
336 Thunk
->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage
);
337 CGM
.setGVProperties(Thunk
, GD
);
340 bool exportThunk() override
{ return true; }
342 llvm::Value
*performThisAdjustment(CodeGenFunction
&CGF
, Address This
,
343 const ThisAdjustment
&TA
) override
;
345 llvm::Value
*performReturnAdjustment(CodeGenFunction
&CGF
, Address Ret
,
346 const ReturnAdjustment
&RA
) override
;
348 size_t getSrcArgforCopyCtor(const CXXConstructorDecl
*,
349 FunctionArgList
&Args
) const override
{
350 assert(!Args
.empty() && "expected the arglist to not be empty!");
351 return Args
.size() - 1;
354 StringRef
GetPureVirtualCallName() override
{ return "__cxa_pure_virtual"; }
355 StringRef
GetDeletedVirtualCallName() override
356 { return "__cxa_deleted_virtual"; }
358 CharUnits
getArrayCookieSizeImpl(QualType elementType
) override
;
359 Address
InitializeArrayCookie(CodeGenFunction
&CGF
,
361 llvm::Value
*NumElements
,
362 const CXXNewExpr
*expr
,
363 QualType ElementType
) override
;
364 llvm::Value
*readArrayCookieImpl(CodeGenFunction
&CGF
,
366 CharUnits cookieSize
) override
;
368 void EmitGuardedInit(CodeGenFunction
&CGF
, const VarDecl
&D
,
369 llvm::GlobalVariable
*DeclPtr
,
370 bool PerformInit
) override
;
371 void registerGlobalDtor(CodeGenFunction
&CGF
, const VarDecl
&D
,
372 llvm::FunctionCallee dtor
,
373 llvm::Constant
*addr
) override
;
375 llvm::Function
*getOrCreateThreadLocalWrapper(const VarDecl
*VD
,
377 void EmitThreadLocalInitFuncs(
379 ArrayRef
<const VarDecl
*> CXXThreadLocals
,
380 ArrayRef
<llvm::Function
*> CXXThreadLocalInits
,
381 ArrayRef
<const VarDecl
*> CXXThreadLocalInitVars
) override
;
383 bool usesThreadWrapperFunction(const VarDecl
*VD
) const override
{
384 return !isEmittedWithConstantInitializer(VD
) ||
385 mayNeedDestruction(VD
);
387 LValue
EmitThreadLocalVarDeclLValue(CodeGenFunction
&CGF
, const VarDecl
*VD
,
388 QualType LValType
) override
;
390 bool NeedsVTTParameter(GlobalDecl GD
) override
;
392 /**************************** RTTI Uniqueness ******************************/
395 /// Returns true if the ABI requires RTTI type_info objects to be unique
396 /// across a program.
397 virtual bool shouldRTTIBeUnique() const { return true; }
400 /// What sort of unique-RTTI behavior should we use?
401 enum RTTIUniquenessKind
{
402 /// We are guaranteeing, or need to guarantee, that the RTTI string
406 /// We are not guaranteeing uniqueness for the RTTI string, so we
407 /// can demote to hidden visibility but must use string comparisons.
410 /// We are not guaranteeing uniqueness for the RTTI string, so we
411 /// have to use string comparisons, but we also have to emit it with
412 /// non-hidden visibility.
416 /// Return the required visibility status for the given type and linkage in
419 classifyRTTIUniqueness(QualType CanTy
,
420 llvm::GlobalValue::LinkageTypes Linkage
) const;
421 friend class ItaniumRTTIBuilder
;
423 void emitCXXStructor(GlobalDecl GD
) override
;
425 std::pair
<llvm::Value
*, const CXXRecordDecl
*>
426 LoadVTablePtr(CodeGenFunction
&CGF
, Address This
,
427 const CXXRecordDecl
*RD
) override
;
430 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl
*RD
) const {
431 const auto &VtableLayout
=
432 CGM
.getItaniumVTableContext().getVTableLayout(RD
);
434 for (const auto &VtableComponent
: VtableLayout
.vtable_components()) {
436 if (!VtableComponent
.isUsedFunctionPointerKind())
439 const CXXMethodDecl
*Method
= VtableComponent
.getFunctionDecl();
440 if (!Method
->getCanonicalDecl()->isInlined())
443 StringRef Name
= CGM
.getMangledName(VtableComponent
.getGlobalDecl());
444 auto *Entry
= CGM
.GetGlobalValue(Name
);
445 // This checks if virtual inline function has already been emitted.
446 // Note that it is possible that this inline function would be emitted
447 // after trying to emit vtable speculatively. Because of this we do
448 // an extra pass after emitting all deferred vtables to find and emit
449 // these vtables opportunistically.
450 if (!Entry
|| Entry
->isDeclaration())
456 bool isVTableHidden(const CXXRecordDecl
*RD
) const {
457 const auto &VtableLayout
=
458 CGM
.getItaniumVTableContext().getVTableLayout(RD
);
460 for (const auto &VtableComponent
: VtableLayout
.vtable_components()) {
461 if (VtableComponent
.isRTTIKind()) {
462 const CXXRecordDecl
*RTTIDecl
= VtableComponent
.getRTTIDecl();
463 if (RTTIDecl
->getVisibility() == Visibility::HiddenVisibility
)
465 } else if (VtableComponent
.isUsedFunctionPointerKind()) {
466 const CXXMethodDecl
*Method
= VtableComponent
.getFunctionDecl();
467 if (Method
->getVisibility() == Visibility::HiddenVisibility
&&
468 !Method
->isDefined())
476 class ARMCXXABI
: public ItaniumCXXABI
{
478 ARMCXXABI(CodeGen::CodeGenModule
&CGM
) :
479 ItaniumCXXABI(CGM
, /*UseARMMethodPtrABI=*/true,
480 /*UseARMGuardVarABI=*/true) {}
482 bool constructorsAndDestructorsReturnThis() const override
{ return true; }
484 void EmitReturnFromThunk(CodeGenFunction
&CGF
, RValue RV
,
485 QualType ResTy
) override
;
487 CharUnits
getArrayCookieSizeImpl(QualType elementType
) override
;
488 Address
InitializeArrayCookie(CodeGenFunction
&CGF
,
490 llvm::Value
*NumElements
,
491 const CXXNewExpr
*expr
,
492 QualType ElementType
) override
;
493 llvm::Value
*readArrayCookieImpl(CodeGenFunction
&CGF
, Address allocPtr
,
494 CharUnits cookieSize
) override
;
497 class AppleARM64CXXABI
: public ARMCXXABI
{
499 AppleARM64CXXABI(CodeGen::CodeGenModule
&CGM
) : ARMCXXABI(CGM
) {
500 Use32BitVTableOffsetABI
= true;
503 // ARM64 libraries are prepared for non-unique RTTI.
504 bool shouldRTTIBeUnique() const override
{ return false; }
507 class FuchsiaCXXABI final
: public ItaniumCXXABI
{
509 explicit FuchsiaCXXABI(CodeGen::CodeGenModule
&CGM
)
510 : ItaniumCXXABI(CGM
) {}
513 bool constructorsAndDestructorsReturnThis() const override
{ return true; }
516 class WebAssemblyCXXABI final
: public ItaniumCXXABI
{
518 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule
&CGM
)
519 : ItaniumCXXABI(CGM
, /*UseARMMethodPtrABI=*/true,
520 /*UseARMGuardVarABI=*/true) {}
521 void emitBeginCatch(CodeGenFunction
&CGF
, const CXXCatchStmt
*C
) override
;
523 emitTerminateForUnexpectedException(CodeGenFunction
&CGF
,
524 llvm::Value
*Exn
) override
;
527 bool constructorsAndDestructorsReturnThis() const override
{ return true; }
528 bool canCallMismatchedFunctionType() const override
{ return false; }
531 class XLCXXABI final
: public ItaniumCXXABI
{
533 explicit XLCXXABI(CodeGen::CodeGenModule
&CGM
)
534 : ItaniumCXXABI(CGM
) {}
536 void registerGlobalDtor(CodeGenFunction
&CGF
, const VarDecl
&D
,
537 llvm::FunctionCallee dtor
,
538 llvm::Constant
*addr
) override
;
540 bool useSinitAndSterm() const override
{ return true; }
543 void emitCXXStermFinalizer(const VarDecl
&D
, llvm::Function
*dtorStub
,
544 llvm::Constant
*addr
);
548 CodeGen::CGCXXABI
*CodeGen::CreateItaniumCXXABI(CodeGenModule
&CGM
) {
549 switch (CGM
.getContext().getCXXABIKind()) {
550 // For IR-generation purposes, there's no significant difference
551 // between the ARM and iOS ABIs.
552 case TargetCXXABI::GenericARM
:
553 case TargetCXXABI::iOS
:
554 case TargetCXXABI::WatchOS
:
555 return new ARMCXXABI(CGM
);
557 case TargetCXXABI::AppleARM64
:
558 return new AppleARM64CXXABI(CGM
);
560 case TargetCXXABI::Fuchsia
:
561 return new FuchsiaCXXABI(CGM
);
563 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
564 // include the other 32-bit ARM oddities: constructor/destructor return values
565 // and array cookies.
566 case TargetCXXABI::GenericAArch64
:
567 return new ItaniumCXXABI(CGM
, /*UseARMMethodPtrABI=*/true,
568 /*UseARMGuardVarABI=*/true);
570 case TargetCXXABI::GenericMIPS
:
571 return new ItaniumCXXABI(CGM
, /*UseARMMethodPtrABI=*/true);
573 case TargetCXXABI::WebAssembly
:
574 return new WebAssemblyCXXABI(CGM
);
576 case TargetCXXABI::XL
:
577 return new XLCXXABI(CGM
);
579 case TargetCXXABI::GenericItanium
:
580 if (CGM
.getContext().getTargetInfo().getTriple().getArch()
581 == llvm::Triple::le32
) {
582 // For PNaCl, use ARM-style method pointers so that PNaCl code
583 // does not assume anything about the alignment of function
585 return new ItaniumCXXABI(CGM
, /*UseARMMethodPtrABI=*/true);
587 return new ItaniumCXXABI(CGM
);
589 case TargetCXXABI::Microsoft
:
590 llvm_unreachable("Microsoft ABI is not Itanium-based");
592 llvm_unreachable("bad ABI kind");
596 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType
*MPT
) {
597 if (MPT
->isMemberDataPointer())
598 return CGM
.PtrDiffTy
;
599 return llvm::StructType::get(CGM
.PtrDiffTy
, CGM
.PtrDiffTy
);
602 /// In the Itanium and ARM ABIs, method pointers have the form:
603 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
605 /// In the Itanium ABI:
606 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
607 /// - the this-adjustment is (memptr.adj)
608 /// - the virtual offset is (memptr.ptr - 1)
611 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
612 /// - the this-adjustment is (memptr.adj >> 1)
613 /// - the virtual offset is (memptr.ptr)
614 /// ARM uses 'adj' for the virtual flag because Thumb functions
615 /// may be only single-byte aligned.
617 /// If the member is virtual, the adjusted 'this' pointer points
618 /// to a vtable pointer from which the virtual offset is applied.
620 /// If the member is non-virtual, memptr.ptr is the address of
621 /// the function to call.
622 CGCallee
ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
623 CodeGenFunction
&CGF
, const Expr
*E
, Address ThisAddr
,
624 llvm::Value
*&ThisPtrForCall
,
625 llvm::Value
*MemFnPtr
, const MemberPointerType
*MPT
) {
626 CGBuilderTy
&Builder
= CGF
.Builder
;
628 const FunctionProtoType
*FPT
=
629 MPT
->getPointeeType()->castAs
<FunctionProtoType
>();
631 cast
<CXXRecordDecl
>(MPT
->getClass()->castAs
<RecordType
>()->getDecl());
633 llvm::Constant
*ptrdiff_1
= llvm::ConstantInt::get(CGM
.PtrDiffTy
, 1);
635 llvm::BasicBlock
*FnVirtual
= CGF
.createBasicBlock("memptr.virtual");
636 llvm::BasicBlock
*FnNonVirtual
= CGF
.createBasicBlock("memptr.nonvirtual");
637 llvm::BasicBlock
*FnEnd
= CGF
.createBasicBlock("memptr.end");
639 // Extract memptr.adj, which is in the second field.
640 llvm::Value
*RawAdj
= Builder
.CreateExtractValue(MemFnPtr
, 1, "memptr.adj");
642 // Compute the true adjustment.
643 llvm::Value
*Adj
= RawAdj
;
644 if (UseARMMethodPtrABI
)
645 Adj
= Builder
.CreateAShr(Adj
, ptrdiff_1
, "memptr.adj.shifted");
647 // Apply the adjustment and cast back to the original struct type
649 llvm::Value
*This
= ThisAddr
.getPointer();
650 This
= Builder
.CreateInBoundsGEP(Builder
.getInt8Ty(), This
, Adj
);
651 ThisPtrForCall
= This
;
653 // Load the function pointer.
654 llvm::Value
*FnAsInt
= Builder
.CreateExtractValue(MemFnPtr
, 0, "memptr.ptr");
656 // If the LSB in the function pointer is 1, the function pointer points to
657 // a virtual function.
658 llvm::Value
*IsVirtual
;
659 if (UseARMMethodPtrABI
)
660 IsVirtual
= Builder
.CreateAnd(RawAdj
, ptrdiff_1
);
662 IsVirtual
= Builder
.CreateAnd(FnAsInt
, ptrdiff_1
);
663 IsVirtual
= Builder
.CreateIsNotNull(IsVirtual
, "memptr.isvirtual");
664 Builder
.CreateCondBr(IsVirtual
, FnVirtual
, FnNonVirtual
);
666 // In the virtual path, the adjustment left 'This' pointing to the
667 // vtable of the correct base subobject. The "function pointer" is an
668 // offset within the vtable (+1 for the virtual flag on non-ARM).
669 CGF
.EmitBlock(FnVirtual
);
671 // Cast the adjusted this to a pointer to vtable pointer and load.
672 llvm::Type
*VTableTy
= CGF
.CGM
.GlobalsInt8PtrTy
;
673 CharUnits VTablePtrAlign
=
674 CGF
.CGM
.getDynamicOffsetAlignment(ThisAddr
.getAlignment(), RD
,
675 CGF
.getPointerAlign());
676 llvm::Value
*VTable
= CGF
.GetVTablePtr(
677 Address(This
, ThisAddr
.getElementType(), VTablePtrAlign
), VTableTy
, RD
);
680 // On ARM64, to reserve extra space in virtual member function pointers,
681 // we only pay attention to the low 32 bits of the offset.
682 llvm::Value
*VTableOffset
= FnAsInt
;
683 if (!UseARMMethodPtrABI
)
684 VTableOffset
= Builder
.CreateSub(VTableOffset
, ptrdiff_1
);
685 if (Use32BitVTableOffsetABI
) {
686 VTableOffset
= Builder
.CreateTrunc(VTableOffset
, CGF
.Int32Ty
);
687 VTableOffset
= Builder
.CreateZExt(VTableOffset
, CGM
.PtrDiffTy
);
690 // Check the address of the function pointer if CFI on member function
691 // pointers is enabled.
692 llvm::Constant
*CheckSourceLocation
;
693 llvm::Constant
*CheckTypeDesc
;
694 bool ShouldEmitCFICheck
= CGF
.SanOpts
.has(SanitizerKind::CFIMFCall
) &&
695 CGM
.HasHiddenLTOVisibility(RD
);
696 bool ShouldEmitVFEInfo
= CGM
.getCodeGenOpts().VirtualFunctionElimination
&&
697 CGM
.HasHiddenLTOVisibility(RD
);
698 bool ShouldEmitWPDInfo
=
699 CGM
.getCodeGenOpts().WholeProgramVTables
&&
700 // Don't insert type tests if we are forcing public visibility.
701 !CGM
.AlwaysHasLTOVisibilityPublic(RD
);
702 llvm::Value
*VirtualFn
= nullptr;
705 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
706 llvm::Value
*TypeId
= nullptr;
707 llvm::Value
*CheckResult
= nullptr;
709 if (ShouldEmitCFICheck
|| ShouldEmitVFEInfo
|| ShouldEmitWPDInfo
) {
710 // If doing CFI, VFE or WPD, we will need the metadata node to check
713 CGM
.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT
, 0));
714 TypeId
= llvm::MetadataAsValue::get(CGF
.getLLVMContext(), MD
);
717 if (ShouldEmitVFEInfo
) {
718 llvm::Value
*VFPAddr
=
719 Builder
.CreateGEP(CGF
.Int8Ty
, VTable
, VTableOffset
);
721 // If doing VFE, load from the vtable with a type.checked.load intrinsic
722 // call. Note that we use the GEP to calculate the address to load from
723 // and pass 0 as the offset to the intrinsic. This is because every
724 // vtable slot of the correct type is marked with matching metadata, and
725 // we know that the load must be from one of these slots.
726 llvm::Value
*CheckedLoad
= Builder
.CreateCall(
727 CGM
.getIntrinsic(llvm::Intrinsic::type_checked_load
),
728 {VFPAddr
, llvm::ConstantInt::get(CGM
.Int32Ty
, 0), TypeId
});
729 CheckResult
= Builder
.CreateExtractValue(CheckedLoad
, 1);
730 VirtualFn
= Builder
.CreateExtractValue(CheckedLoad
, 0);
732 // When not doing VFE, emit a normal load, as it allows more
733 // optimisations than type.checked.load.
734 if (ShouldEmitCFICheck
|| ShouldEmitWPDInfo
) {
735 llvm::Value
*VFPAddr
=
736 Builder
.CreateGEP(CGF
.Int8Ty
, VTable
, VTableOffset
);
737 llvm::Intrinsic::ID IID
= CGM
.HasHiddenLTOVisibility(RD
)
738 ? llvm::Intrinsic::type_test
739 : llvm::Intrinsic::public_type_test
;
742 Builder
.CreateCall(CGM
.getIntrinsic(IID
), {VFPAddr
, TypeId
});
745 if (CGM
.getItaniumVTableContext().isRelativeLayout()) {
746 VirtualFn
= CGF
.Builder
.CreateCall(
747 CGM
.getIntrinsic(llvm::Intrinsic::load_relative
,
748 {VTableOffset
->getType()}),
749 {VTable
, VTableOffset
});
751 llvm::Value
*VFPAddr
=
752 CGF
.Builder
.CreateGEP(CGF
.Int8Ty
, VTable
, VTableOffset
);
753 VirtualFn
= CGF
.Builder
.CreateAlignedLoad(
754 llvm::PointerType::getUnqual(CGF
.getLLVMContext()), VFPAddr
,
755 CGF
.getPointerAlign(), "memptr.virtualfn");
758 assert(VirtualFn
&& "Virtual fuction pointer not created!");
759 assert((!ShouldEmitCFICheck
|| !ShouldEmitVFEInfo
|| !ShouldEmitWPDInfo
||
761 "Check result required but not created!");
763 if (ShouldEmitCFICheck
) {
764 // If doing CFI, emit the check.
765 CheckSourceLocation
= CGF
.EmitCheckSourceLocation(E
->getBeginLoc());
766 CheckTypeDesc
= CGF
.EmitCheckTypeDescriptor(QualType(MPT
, 0));
767 llvm::Constant
*StaticData
[] = {
768 llvm::ConstantInt::get(CGF
.Int8Ty
, CodeGenFunction::CFITCK_VMFCall
),
773 if (CGM
.getCodeGenOpts().SanitizeTrap
.has(SanitizerKind::CFIMFCall
)) {
774 CGF
.EmitTrapCheck(CheckResult
, SanitizerHandler::CFICheckFail
);
776 llvm::Value
*AllVtables
= llvm::MetadataAsValue::get(
777 CGM
.getLLVMContext(),
778 llvm::MDString::get(CGM
.getLLVMContext(), "all-vtables"));
779 llvm::Value
*ValidVtable
= Builder
.CreateCall(
780 CGM
.getIntrinsic(llvm::Intrinsic::type_test
), {VTable
, AllVtables
});
781 CGF
.EmitCheck(std::make_pair(CheckResult
, SanitizerKind::CFIMFCall
),
782 SanitizerHandler::CFICheckFail
, StaticData
,
783 {VTable
, ValidVtable
});
786 FnVirtual
= Builder
.GetInsertBlock();
788 } // End of sanitizer scope
790 CGF
.EmitBranch(FnEnd
);
792 // In the non-virtual path, the function pointer is actually a
794 CGF
.EmitBlock(FnNonVirtual
);
795 llvm::Value
*NonVirtualFn
= Builder
.CreateIntToPtr(
796 FnAsInt
, llvm::PointerType::getUnqual(CGF
.getLLVMContext()),
797 "memptr.nonvirtualfn");
799 // Check the function pointer if CFI on member function pointers is enabled.
800 if (ShouldEmitCFICheck
) {
801 CXXRecordDecl
*RD
= MPT
->getClass()->getAsCXXRecordDecl();
802 if (RD
->hasDefinition()) {
803 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
805 llvm::Constant
*StaticData
[] = {
806 llvm::ConstantInt::get(CGF
.Int8Ty
, CodeGenFunction::CFITCK_NVMFCall
),
811 llvm::Value
*Bit
= Builder
.getFalse();
812 for (const CXXRecordDecl
*Base
: CGM
.getMostBaseClasses(RD
)) {
813 llvm::Metadata
*MD
= CGM
.CreateMetadataIdentifierForType(
814 getContext().getMemberPointerType(
815 MPT
->getPointeeType(),
816 getContext().getRecordType(Base
).getTypePtr()));
817 llvm::Value
*TypeId
=
818 llvm::MetadataAsValue::get(CGF
.getLLVMContext(), MD
);
820 llvm::Value
*TypeTest
=
821 Builder
.CreateCall(CGM
.getIntrinsic(llvm::Intrinsic::type_test
),
822 {NonVirtualFn
, TypeId
});
823 Bit
= Builder
.CreateOr(Bit
, TypeTest
);
826 CGF
.EmitCheck(std::make_pair(Bit
, SanitizerKind::CFIMFCall
),
827 SanitizerHandler::CFICheckFail
, StaticData
,
828 {NonVirtualFn
, llvm::UndefValue::get(CGF
.IntPtrTy
)});
830 FnNonVirtual
= Builder
.GetInsertBlock();
835 CGF
.EmitBlock(FnEnd
);
836 llvm::PHINode
*CalleePtr
=
837 Builder
.CreatePHI(llvm::PointerType::getUnqual(CGF
.getLLVMContext()), 2);
838 CalleePtr
->addIncoming(VirtualFn
, FnVirtual
);
839 CalleePtr
->addIncoming(NonVirtualFn
, FnNonVirtual
);
841 CGCallee
Callee(FPT
, CalleePtr
);
845 /// Compute an l-value by applying the given pointer-to-member to a
847 llvm::Value
*ItaniumCXXABI::EmitMemberDataPointerAddress(
848 CodeGenFunction
&CGF
, const Expr
*E
, Address Base
, llvm::Value
*MemPtr
,
849 const MemberPointerType
*MPT
) {
850 assert(MemPtr
->getType() == CGM
.PtrDiffTy
);
852 CGBuilderTy
&Builder
= CGF
.Builder
;
854 // Apply the offset, which we assume is non-null.
855 return Builder
.CreateInBoundsGEP(CGF
.Int8Ty
, Base
.getPointer(), MemPtr
,
859 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
862 /// Bitcast conversions are always a no-op under Itanium.
864 /// Obligatory offset/adjustment diagram:
865 /// <-- offset --> <-- adjustment -->
866 /// |--------------------------|----------------------|--------------------|
867 /// ^Derived address point ^Base address point ^Member address point
869 /// So when converting a base member pointer to a derived member pointer,
870 /// we add the offset to the adjustment because the address point has
871 /// decreased; and conversely, when converting a derived MP to a base MP
872 /// we subtract the offset from the adjustment because the address point
875 /// The standard forbids (at compile time) conversion to and from
876 /// virtual bases, which is why we don't have to consider them here.
878 /// The standard forbids (at run time) casting a derived MP to a base
879 /// MP when the derived MP does not point to a member of the base.
880 /// This is why -1 is a reasonable choice for null data member
883 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction
&CGF
,
886 assert(E
->getCastKind() == CK_DerivedToBaseMemberPointer
||
887 E
->getCastKind() == CK_BaseToDerivedMemberPointer
||
888 E
->getCastKind() == CK_ReinterpretMemberPointer
);
890 // Under Itanium, reinterprets don't require any additional processing.
891 if (E
->getCastKind() == CK_ReinterpretMemberPointer
) return src
;
893 // Use constant emission if we can.
894 if (isa
<llvm::Constant
>(src
))
895 return EmitMemberPointerConversion(E
, cast
<llvm::Constant
>(src
));
897 llvm::Constant
*adj
= getMemberPointerAdjustment(E
);
898 if (!adj
) return src
;
900 CGBuilderTy
&Builder
= CGF
.Builder
;
901 bool isDerivedToBase
= (E
->getCastKind() == CK_DerivedToBaseMemberPointer
);
903 const MemberPointerType
*destTy
=
904 E
->getType()->castAs
<MemberPointerType
>();
906 // For member data pointers, this is just a matter of adding the
907 // offset if the source is non-null.
908 if (destTy
->isMemberDataPointer()) {
911 dst
= Builder
.CreateNSWSub(src
, adj
, "adj");
913 dst
= Builder
.CreateNSWAdd(src
, adj
, "adj");
916 llvm::Value
*null
= llvm::Constant::getAllOnesValue(src
->getType());
917 llvm::Value
*isNull
= Builder
.CreateICmpEQ(src
, null
, "memptr.isnull");
918 return Builder
.CreateSelect(isNull
, src
, dst
);
921 // The this-adjustment is left-shifted by 1 on ARM.
922 if (UseARMMethodPtrABI
) {
923 uint64_t offset
= cast
<llvm::ConstantInt
>(adj
)->getZExtValue();
925 adj
= llvm::ConstantInt::get(adj
->getType(), offset
);
928 llvm::Value
*srcAdj
= Builder
.CreateExtractValue(src
, 1, "src.adj");
931 dstAdj
= Builder
.CreateNSWSub(srcAdj
, adj
, "adj");
933 dstAdj
= Builder
.CreateNSWAdd(srcAdj
, adj
, "adj");
935 return Builder
.CreateInsertValue(src
, dstAdj
, 1);
939 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr
*E
,
940 llvm::Constant
*src
) {
941 assert(E
->getCastKind() == CK_DerivedToBaseMemberPointer
||
942 E
->getCastKind() == CK_BaseToDerivedMemberPointer
||
943 E
->getCastKind() == CK_ReinterpretMemberPointer
);
945 // Under Itanium, reinterprets don't require any additional processing.
946 if (E
->getCastKind() == CK_ReinterpretMemberPointer
) return src
;
948 // If the adjustment is trivial, we don't need to do anything.
949 llvm::Constant
*adj
= getMemberPointerAdjustment(E
);
950 if (!adj
) return src
;
952 bool isDerivedToBase
= (E
->getCastKind() == CK_DerivedToBaseMemberPointer
);
954 const MemberPointerType
*destTy
=
955 E
->getType()->castAs
<MemberPointerType
>();
957 // For member data pointers, this is just a matter of adding the
958 // offset if the source is non-null.
959 if (destTy
->isMemberDataPointer()) {
960 // null maps to null.
961 if (src
->isAllOnesValue()) return src
;
964 return llvm::ConstantExpr::getNSWSub(src
, adj
);
966 return llvm::ConstantExpr::getNSWAdd(src
, adj
);
969 // The this-adjustment is left-shifted by 1 on ARM.
970 if (UseARMMethodPtrABI
) {
971 uint64_t offset
= cast
<llvm::ConstantInt
>(adj
)->getZExtValue();
973 adj
= llvm::ConstantInt::get(adj
->getType(), offset
);
976 llvm::Constant
*srcAdj
= src
->getAggregateElement(1);
977 llvm::Constant
*dstAdj
;
979 dstAdj
= llvm::ConstantExpr::getNSWSub(srcAdj
, adj
);
981 dstAdj
= llvm::ConstantExpr::getNSWAdd(srcAdj
, adj
);
983 llvm::Constant
*res
= ConstantFoldInsertValueInstruction(src
, dstAdj
, 1);
984 assert(res
!= nullptr && "Folding must succeed");
989 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType
*MPT
) {
990 // Itanium C++ ABI 2.3:
991 // A NULL pointer is represented as -1.
992 if (MPT
->isMemberDataPointer())
993 return llvm::ConstantInt::get(CGM
.PtrDiffTy
, -1ULL, /*isSigned=*/true);
995 llvm::Constant
*Zero
= llvm::ConstantInt::get(CGM
.PtrDiffTy
, 0);
996 llvm::Constant
*Values
[2] = { Zero
, Zero
};
997 return llvm::ConstantStruct::getAnon(Values
);
1001 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType
*MPT
,
1003 // Itanium C++ ABI 2.3:
1004 // A pointer to data member is an offset from the base address of
1005 // the class object containing it, represented as a ptrdiff_t
1006 return llvm::ConstantInt::get(CGM
.PtrDiffTy
, offset
.getQuantity());
1010 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl
*MD
) {
1011 return BuildMemberPointer(MD
, CharUnits::Zero());
1014 llvm::Constant
*ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl
*MD
,
1015 CharUnits ThisAdjustment
) {
1016 assert(MD
->isInstance() && "Member function must not be static!");
1018 CodeGenTypes
&Types
= CGM
.getTypes();
1020 // Get the function pointer (or index if this is a virtual function).
1021 llvm::Constant
*MemPtr
[2];
1022 if (MD
->isVirtual()) {
1023 uint64_t Index
= CGM
.getItaniumVTableContext().getMethodVTableIndex(MD
);
1024 uint64_t VTableOffset
;
1025 if (CGM
.getItaniumVTableContext().isRelativeLayout()) {
1026 // Multiply by 4-byte relative offsets.
1027 VTableOffset
= Index
* 4;
1029 const ASTContext
&Context
= getContext();
1030 CharUnits PointerWidth
= Context
.toCharUnitsFromBits(
1031 Context
.getTargetInfo().getPointerWidth(LangAS::Default
));
1032 VTableOffset
= Index
* PointerWidth
.getQuantity();
1035 if (UseARMMethodPtrABI
) {
1036 // ARM C++ ABI 3.2.1:
1037 // This ABI specifies that adj contains twice the this
1038 // adjustment, plus 1 if the member function is virtual. The
1039 // least significant bit of adj then makes exactly the same
1040 // discrimination as the least significant bit of ptr does for
1042 MemPtr
[0] = llvm::ConstantInt::get(CGM
.PtrDiffTy
, VTableOffset
);
1043 MemPtr
[1] = llvm::ConstantInt::get(CGM
.PtrDiffTy
,
1044 2 * ThisAdjustment
.getQuantity() + 1);
1046 // Itanium C++ ABI 2.3:
1047 // For a virtual function, [the pointer field] is 1 plus the
1048 // virtual table offset (in bytes) of the function,
1049 // represented as a ptrdiff_t.
1050 MemPtr
[0] = llvm::ConstantInt::get(CGM
.PtrDiffTy
, VTableOffset
+ 1);
1051 MemPtr
[1] = llvm::ConstantInt::get(CGM
.PtrDiffTy
,
1052 ThisAdjustment
.getQuantity());
1055 const FunctionProtoType
*FPT
= MD
->getType()->castAs
<FunctionProtoType
>();
1057 // Check whether the function has a computable LLVM signature.
1058 if (Types
.isFuncTypeConvertible(FPT
)) {
1059 // The function has a computable LLVM signature; use the correct type.
1060 Ty
= Types
.GetFunctionType(Types
.arrangeCXXMethodDeclaration(MD
));
1062 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1063 // function type is incomplete.
1066 llvm::Constant
*addr
= CGM
.GetAddrOfFunction(MD
, Ty
);
1068 MemPtr
[0] = llvm::ConstantExpr::getPtrToInt(addr
, CGM
.PtrDiffTy
);
1069 MemPtr
[1] = llvm::ConstantInt::get(CGM
.PtrDiffTy
,
1070 (UseARMMethodPtrABI
? 2 : 1) *
1071 ThisAdjustment
.getQuantity());
1074 return llvm::ConstantStruct::getAnon(MemPtr
);
1077 llvm::Constant
*ItaniumCXXABI::EmitMemberPointer(const APValue
&MP
,
1079 const MemberPointerType
*MPT
= MPType
->castAs
<MemberPointerType
>();
1080 const ValueDecl
*MPD
= MP
.getMemberPointerDecl();
1082 return EmitNullMemberPointer(MPT
);
1084 CharUnits ThisAdjustment
= getContext().getMemberPointerPathAdjustment(MP
);
1086 if (const CXXMethodDecl
*MD
= dyn_cast
<CXXMethodDecl
>(MPD
))
1087 return BuildMemberPointer(MD
, ThisAdjustment
);
1089 CharUnits FieldOffset
=
1090 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD
));
1091 return EmitMemberDataPointer(MPT
, ThisAdjustment
+ FieldOffset
);
1094 /// The comparison algorithm is pretty easy: the member pointers are
1095 /// the same if they're either bitwise identical *or* both null.
1097 /// ARM is different here only because null-ness is more complicated.
1099 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction
&CGF
,
1102 const MemberPointerType
*MPT
,
1104 CGBuilderTy
&Builder
= CGF
.Builder
;
1106 llvm::ICmpInst::Predicate Eq
;
1107 llvm::Instruction::BinaryOps And
, Or
;
1109 Eq
= llvm::ICmpInst::ICMP_NE
;
1110 And
= llvm::Instruction::Or
;
1111 Or
= llvm::Instruction::And
;
1113 Eq
= llvm::ICmpInst::ICMP_EQ
;
1114 And
= llvm::Instruction::And
;
1115 Or
= llvm::Instruction::Or
;
1118 // Member data pointers are easy because there's a unique null
1119 // value, so it just comes down to bitwise equality.
1120 if (MPT
->isMemberDataPointer())
1121 return Builder
.CreateICmp(Eq
, L
, R
);
1123 // For member function pointers, the tautologies are more complex.
1124 // The Itanium tautology is:
1125 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1126 // The ARM tautology is:
1127 // (L == R) <==> (L.ptr == R.ptr &&
1128 // (L.adj == R.adj ||
1129 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1130 // The inequality tautologies have exactly the same structure, except
1131 // applying De Morgan's laws.
1133 llvm::Value
*LPtr
= Builder
.CreateExtractValue(L
, 0, "lhs.memptr.ptr");
1134 llvm::Value
*RPtr
= Builder
.CreateExtractValue(R
, 0, "rhs.memptr.ptr");
1136 // This condition tests whether L.ptr == R.ptr. This must always be
1137 // true for equality to hold.
1138 llvm::Value
*PtrEq
= Builder
.CreateICmp(Eq
, LPtr
, RPtr
, "cmp.ptr");
1140 // This condition, together with the assumption that L.ptr == R.ptr,
1141 // tests whether the pointers are both null. ARM imposes an extra
1143 llvm::Value
*Zero
= llvm::Constant::getNullValue(LPtr
->getType());
1144 llvm::Value
*EqZero
= Builder
.CreateICmp(Eq
, LPtr
, Zero
, "cmp.ptr.null");
1146 // This condition tests whether L.adj == R.adj. If this isn't
1147 // true, the pointers are unequal unless they're both null.
1148 llvm::Value
*LAdj
= Builder
.CreateExtractValue(L
, 1, "lhs.memptr.adj");
1149 llvm::Value
*RAdj
= Builder
.CreateExtractValue(R
, 1, "rhs.memptr.adj");
1150 llvm::Value
*AdjEq
= Builder
.CreateICmp(Eq
, LAdj
, RAdj
, "cmp.adj");
1152 // Null member function pointers on ARM clear the low bit of Adj,
1153 // so the zero condition has to check that neither low bit is set.
1154 if (UseARMMethodPtrABI
) {
1155 llvm::Value
*One
= llvm::ConstantInt::get(LPtr
->getType(), 1);
1157 // Compute (l.adj | r.adj) & 1 and test it against zero.
1158 llvm::Value
*OrAdj
= Builder
.CreateOr(LAdj
, RAdj
, "or.adj");
1159 llvm::Value
*OrAdjAnd1
= Builder
.CreateAnd(OrAdj
, One
);
1160 llvm::Value
*OrAdjAnd1EqZero
= Builder
.CreateICmp(Eq
, OrAdjAnd1
, Zero
,
1162 EqZero
= Builder
.CreateBinOp(And
, EqZero
, OrAdjAnd1EqZero
);
1165 // Tie together all our conditions.
1166 llvm::Value
*Result
= Builder
.CreateBinOp(Or
, EqZero
, AdjEq
);
1167 Result
= Builder
.CreateBinOp(And
, PtrEq
, Result
,
1168 Inequality
? "memptr.ne" : "memptr.eq");
1173 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction
&CGF
,
1174 llvm::Value
*MemPtr
,
1175 const MemberPointerType
*MPT
) {
1176 CGBuilderTy
&Builder
= CGF
.Builder
;
1178 /// For member data pointers, this is just a check against -1.
1179 if (MPT
->isMemberDataPointer()) {
1180 assert(MemPtr
->getType() == CGM
.PtrDiffTy
);
1181 llvm::Value
*NegativeOne
=
1182 llvm::Constant::getAllOnesValue(MemPtr
->getType());
1183 return Builder
.CreateICmpNE(MemPtr
, NegativeOne
, "memptr.tobool");
1186 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1187 llvm::Value
*Ptr
= Builder
.CreateExtractValue(MemPtr
, 0, "memptr.ptr");
1189 llvm::Constant
*Zero
= llvm::ConstantInt::get(Ptr
->getType(), 0);
1190 llvm::Value
*Result
= Builder
.CreateICmpNE(Ptr
, Zero
, "memptr.tobool");
1192 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1193 // (the virtual bit) is set.
1194 if (UseARMMethodPtrABI
) {
1195 llvm::Constant
*One
= llvm::ConstantInt::get(Ptr
->getType(), 1);
1196 llvm::Value
*Adj
= Builder
.CreateExtractValue(MemPtr
, 1, "memptr.adj");
1197 llvm::Value
*VirtualBit
= Builder
.CreateAnd(Adj
, One
, "memptr.virtualbit");
1198 llvm::Value
*IsVirtual
= Builder
.CreateICmpNE(VirtualBit
, Zero
,
1199 "memptr.isvirtual");
1200 Result
= Builder
.CreateOr(Result
, IsVirtual
);
1206 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo
&FI
) const {
1207 const CXXRecordDecl
*RD
= FI
.getReturnType()->getAsCXXRecordDecl();
1211 // If C++ prohibits us from making a copy, return by address.
1212 if (!RD
->canPassInRegisters()) {
1213 auto Align
= CGM
.getContext().getTypeAlignInChars(FI
.getReturnType());
1214 FI
.getReturnInfo() = ABIArgInfo::getIndirect(Align
, /*ByVal=*/false);
1220 /// The Itanium ABI requires non-zero initialization only for data
1221 /// member pointers, for which '0' is a valid offset.
1222 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType
*MPT
) {
1223 return MPT
->isMemberFunctionPointer();
1226 /// The Itanium ABI always places an offset to the complete object
1227 /// at entry -2 in the vtable.
1228 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction
&CGF
,
1229 const CXXDeleteExpr
*DE
,
1231 QualType ElementType
,
1232 const CXXDestructorDecl
*Dtor
) {
1233 bool UseGlobalDelete
= DE
->isGlobalDelete();
1234 if (UseGlobalDelete
) {
1235 // Derive the complete-object pointer, which is what we need
1236 // to pass to the deallocation function.
1238 // Grab the vtable pointer as an intptr_t*.
1240 cast
<CXXRecordDecl
>(ElementType
->castAs
<RecordType
>()->getDecl());
1241 llvm::Value
*VTable
= CGF
.GetVTablePtr(
1242 Ptr
, llvm::PointerType::getUnqual(CGF
.getLLVMContext()), ClassDecl
);
1244 // Track back to entry -2 and pull out the offset there.
1245 llvm::Value
*OffsetPtr
= CGF
.Builder
.CreateConstInBoundsGEP1_64(
1246 CGF
.IntPtrTy
, VTable
, -2, "complete-offset.ptr");
1247 llvm::Value
*Offset
= CGF
.Builder
.CreateAlignedLoad(CGF
.IntPtrTy
, OffsetPtr
,
1248 CGF
.getPointerAlign());
1250 // Apply the offset.
1251 llvm::Value
*CompletePtr
= Ptr
.getPointer();
1253 CGF
.Builder
.CreateInBoundsGEP(CGF
.Int8Ty
, CompletePtr
, Offset
);
1255 // If we're supposed to call the global delete, make sure we do so
1256 // even if the destructor throws.
1257 CGF
.pushCallObjectDeleteCleanup(DE
->getOperatorDelete(), CompletePtr
,
1261 // FIXME: Provide a source location here even though there's no
1262 // CXXMemberCallExpr for dtor call.
1263 CXXDtorType DtorType
= UseGlobalDelete
? Dtor_Complete
: Dtor_Deleting
;
1264 EmitVirtualDestructorCall(CGF
, Dtor
, DtorType
, Ptr
, DE
);
1266 if (UseGlobalDelete
)
1267 CGF
.PopCleanupBlock();
1270 void ItaniumCXXABI::emitRethrow(CodeGenFunction
&CGF
, bool isNoReturn
) {
1271 // void __cxa_rethrow();
1273 llvm::FunctionType
*FTy
=
1274 llvm::FunctionType::get(CGM
.VoidTy
, /*isVarArg=*/false);
1276 llvm::FunctionCallee Fn
= CGM
.CreateRuntimeFunction(FTy
, "__cxa_rethrow");
1279 CGF
.EmitNoreturnRuntimeCallOrInvoke(Fn
, std::nullopt
);
1281 CGF
.EmitRuntimeCallOrInvoke(Fn
);
1284 static llvm::FunctionCallee
getAllocateExceptionFn(CodeGenModule
&CGM
) {
1285 // void *__cxa_allocate_exception(size_t thrown_size);
1287 llvm::FunctionType
*FTy
=
1288 llvm::FunctionType::get(CGM
.Int8PtrTy
, CGM
.SizeTy
, /*isVarArg=*/false);
1290 return CGM
.CreateRuntimeFunction(FTy
, "__cxa_allocate_exception");
1293 static llvm::FunctionCallee
getThrowFn(CodeGenModule
&CGM
) {
1294 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1295 // void (*dest) (void *));
1297 llvm::Type
*Args
[3] = { CGM
.Int8PtrTy
, CGM
.GlobalsInt8PtrTy
, CGM
.Int8PtrTy
};
1298 llvm::FunctionType
*FTy
=
1299 llvm::FunctionType::get(CGM
.VoidTy
, Args
, /*isVarArg=*/false);
1301 return CGM
.CreateRuntimeFunction(FTy
, "__cxa_throw");
1304 void ItaniumCXXABI::emitThrow(CodeGenFunction
&CGF
, const CXXThrowExpr
*E
) {
1305 QualType ThrowType
= E
->getSubExpr()->getType();
1306 // Now allocate the exception object.
1307 llvm::Type
*SizeTy
= CGF
.ConvertType(getContext().getSizeType());
1308 uint64_t TypeSize
= getContext().getTypeSizeInChars(ThrowType
).getQuantity();
1310 llvm::FunctionCallee AllocExceptionFn
= getAllocateExceptionFn(CGM
);
1311 llvm::CallInst
*ExceptionPtr
= CGF
.EmitNounwindRuntimeCall(
1312 AllocExceptionFn
, llvm::ConstantInt::get(SizeTy
, TypeSize
), "exception");
1314 CharUnits ExnAlign
= CGF
.getContext().getExnObjectAlignment();
1315 CGF
.EmitAnyExprToExn(
1316 E
->getSubExpr(), Address(ExceptionPtr
, CGM
.Int8Ty
, ExnAlign
));
1318 // Now throw the exception.
1319 llvm::Constant
*TypeInfo
= CGM
.GetAddrOfRTTIDescriptor(ThrowType
,
1322 // The address of the destructor. If the exception type has a
1323 // trivial destructor (or isn't a record), we just pass null.
1324 llvm::Constant
*Dtor
= nullptr;
1325 if (const RecordType
*RecordTy
= ThrowType
->getAs
<RecordType
>()) {
1326 CXXRecordDecl
*Record
= cast
<CXXRecordDecl
>(RecordTy
->getDecl());
1327 if (!Record
->hasTrivialDestructor()) {
1328 CXXDestructorDecl
*DtorD
= Record
->getDestructor();
1329 Dtor
= CGM
.getAddrOfCXXStructor(GlobalDecl(DtorD
, Dtor_Complete
));
1330 Dtor
= llvm::ConstantExpr::getBitCast(Dtor
, CGM
.Int8PtrTy
);
1333 if (!Dtor
) Dtor
= llvm::Constant::getNullValue(CGM
.Int8PtrTy
);
1335 llvm::Value
*args
[] = { ExceptionPtr
, TypeInfo
, Dtor
};
1336 CGF
.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM
), args
);
1339 static llvm::FunctionCallee
getItaniumDynamicCastFn(CodeGenFunction
&CGF
) {
1340 // void *__dynamic_cast(const void *sub,
1341 // global_as const abi::__class_type_info *src,
1342 // global_as const abi::__class_type_info *dst,
1343 // std::ptrdiff_t src2dst_offset);
1345 llvm::Type
*Int8PtrTy
= CGF
.Int8PtrTy
;
1346 llvm::Type
*GlobInt8PtrTy
= CGF
.GlobalsInt8PtrTy
;
1347 llvm::Type
*PtrDiffTy
=
1348 CGF
.ConvertType(CGF
.getContext().getPointerDiffType());
1350 llvm::Type
*Args
[4] = { Int8PtrTy
, GlobInt8PtrTy
, GlobInt8PtrTy
, PtrDiffTy
};
1352 llvm::FunctionType
*FTy
= llvm::FunctionType::get(Int8PtrTy
, Args
, false);
1354 // Mark the function as nounwind readonly.
1355 llvm::AttrBuilder
FuncAttrs(CGF
.getLLVMContext());
1356 FuncAttrs
.addAttribute(llvm::Attribute::NoUnwind
);
1357 FuncAttrs
.addMemoryAttr(llvm::MemoryEffects::readOnly());
1358 llvm::AttributeList Attrs
= llvm::AttributeList::get(
1359 CGF
.getLLVMContext(), llvm::AttributeList::FunctionIndex
, FuncAttrs
);
1361 return CGF
.CGM
.CreateRuntimeFunction(FTy
, "__dynamic_cast", Attrs
);
1364 static llvm::FunctionCallee
getBadCastFn(CodeGenFunction
&CGF
) {
1365 // void __cxa_bad_cast();
1366 llvm::FunctionType
*FTy
= llvm::FunctionType::get(CGF
.VoidTy
, false);
1367 return CGF
.CGM
.CreateRuntimeFunction(FTy
, "__cxa_bad_cast");
1370 /// Compute the src2dst_offset hint as described in the
1371 /// Itanium C++ ABI [2.9.7]
1372 static CharUnits
computeOffsetHint(ASTContext
&Context
,
1373 const CXXRecordDecl
*Src
,
1374 const CXXRecordDecl
*Dst
) {
1375 CXXBasePaths
Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1376 /*DetectVirtual=*/false);
1378 // If Dst is not derived from Src we can skip the whole computation below and
1379 // return that Src is not a public base of Dst. Record all inheritance paths.
1380 if (!Dst
->isDerivedFrom(Src
, Paths
))
1381 return CharUnits::fromQuantity(-2ULL);
1383 unsigned NumPublicPaths
= 0;
1386 // Now walk all possible inheritance paths.
1387 for (const CXXBasePath
&Path
: Paths
) {
1388 if (Path
.Access
!= AS_public
) // Ignore non-public inheritance.
1393 for (const CXXBasePathElement
&PathElement
: Path
) {
1394 // If the path contains a virtual base class we can't give any hint.
1396 if (PathElement
.Base
->isVirtual())
1397 return CharUnits::fromQuantity(-1ULL);
1399 if (NumPublicPaths
> 1) // Won't use offsets, skip computation.
1402 // Accumulate the base class offsets.
1403 const ASTRecordLayout
&L
= Context
.getASTRecordLayout(PathElement
.Class
);
1404 Offset
+= L
.getBaseClassOffset(
1405 PathElement
.Base
->getType()->getAsCXXRecordDecl());
1409 // -2: Src is not a public base of Dst.
1410 if (NumPublicPaths
== 0)
1411 return CharUnits::fromQuantity(-2ULL);
1413 // -3: Src is a multiple public base type but never a virtual base type.
1414 if (NumPublicPaths
> 1)
1415 return CharUnits::fromQuantity(-3ULL);
1417 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1418 // Return the offset of Src from the origin of Dst.
1422 static llvm::FunctionCallee
getBadTypeidFn(CodeGenFunction
&CGF
) {
1423 // void __cxa_bad_typeid();
1424 llvm::FunctionType
*FTy
= llvm::FunctionType::get(CGF
.VoidTy
, false);
1426 return CGF
.CGM
.CreateRuntimeFunction(FTy
, "__cxa_bad_typeid");
1429 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref
,
1430 QualType SrcRecordTy
) {
1434 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction
&CGF
) {
1435 llvm::FunctionCallee Fn
= getBadTypeidFn(CGF
);
1436 llvm::CallBase
*Call
= CGF
.EmitRuntimeCallOrInvoke(Fn
);
1437 Call
->setDoesNotReturn();
1438 CGF
.Builder
.CreateUnreachable();
1441 llvm::Value
*ItaniumCXXABI::EmitTypeid(CodeGenFunction
&CGF
,
1442 QualType SrcRecordTy
,
1444 llvm::Type
*StdTypeInfoPtrTy
) {
1446 cast
<CXXRecordDecl
>(SrcRecordTy
->castAs
<RecordType
>()->getDecl());
1447 llvm::Value
*Value
= CGF
.GetVTablePtr(
1448 ThisPtr
, llvm::PointerType::getUnqual(CGF
.getLLVMContext()), ClassDecl
);
1450 if (CGM
.getItaniumVTableContext().isRelativeLayout()) {
1451 // Load the type info.
1452 Value
= CGF
.Builder
.CreateCall(
1453 CGM
.getIntrinsic(llvm::Intrinsic::load_relative
, {CGM
.Int32Ty
}),
1454 {Value
, llvm::ConstantInt::get(CGM
.Int32Ty
, -4)});
1456 // Load the type info.
1458 CGF
.Builder
.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy
, Value
, -1ULL);
1460 return CGF
.Builder
.CreateAlignedLoad(StdTypeInfoPtrTy
, Value
,
1461 CGF
.getPointerAlign());
1464 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr
,
1465 QualType SrcRecordTy
) {
1469 llvm::Value
*ItaniumCXXABI::emitDynamicCastCall(
1470 CodeGenFunction
&CGF
, Address ThisAddr
, QualType SrcRecordTy
,
1471 QualType DestTy
, QualType DestRecordTy
, llvm::BasicBlock
*CastEnd
) {
1472 llvm::Type
*PtrDiffLTy
=
1473 CGF
.ConvertType(CGF
.getContext().getPointerDiffType());
1475 llvm::Value
*SrcRTTI
=
1476 CGF
.CGM
.GetAddrOfRTTIDescriptor(SrcRecordTy
.getUnqualifiedType());
1477 llvm::Value
*DestRTTI
=
1478 CGF
.CGM
.GetAddrOfRTTIDescriptor(DestRecordTy
.getUnqualifiedType());
1480 // Compute the offset hint.
1481 const CXXRecordDecl
*SrcDecl
= SrcRecordTy
->getAsCXXRecordDecl();
1482 const CXXRecordDecl
*DestDecl
= DestRecordTy
->getAsCXXRecordDecl();
1483 llvm::Value
*OffsetHint
= llvm::ConstantInt::get(
1485 computeOffsetHint(CGF
.getContext(), SrcDecl
, DestDecl
).getQuantity());
1487 // Emit the call to __dynamic_cast.
1488 llvm::Value
*Args
[] = {ThisAddr
.getPointer(), SrcRTTI
, DestRTTI
, OffsetHint
};
1489 llvm::Value
*Value
=
1490 CGF
.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF
), Args
);
1492 /// C++ [expr.dynamic.cast]p9:
1493 /// A failed cast to reference type throws std::bad_cast
1494 if (DestTy
->isReferenceType()) {
1495 llvm::BasicBlock
*BadCastBlock
=
1496 CGF
.createBasicBlock("dynamic_cast.bad_cast");
1498 llvm::Value
*IsNull
= CGF
.Builder
.CreateIsNull(Value
);
1499 CGF
.Builder
.CreateCondBr(IsNull
, BadCastBlock
, CastEnd
);
1501 CGF
.EmitBlock(BadCastBlock
);
1502 EmitBadCastCall(CGF
);
1508 llvm::Value
*ItaniumCXXABI::emitExactDynamicCast(
1509 CodeGenFunction
&CGF
, Address ThisAddr
, QualType SrcRecordTy
,
1510 QualType DestTy
, QualType DestRecordTy
, llvm::BasicBlock
*CastSuccess
,
1511 llvm::BasicBlock
*CastFail
) {
1512 ASTContext
&Context
= getContext();
1514 // Find all the inheritance paths.
1515 const CXXRecordDecl
*SrcDecl
= SrcRecordTy
->getAsCXXRecordDecl();
1516 const CXXRecordDecl
*DestDecl
= DestRecordTy
->getAsCXXRecordDecl();
1517 CXXBasePaths
Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1518 /*DetectVirtual=*/false);
1519 (void)DestDecl
->isDerivedFrom(SrcDecl
, Paths
);
1521 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
1523 std::optional
<CharUnits
> Offset
;
1524 for (const CXXBasePath
&Path
: Paths
) {
1525 // dynamic_cast only finds public inheritance paths.
1526 if (Path
.Access
!= AS_public
)
1529 CharUnits PathOffset
;
1530 for (const CXXBasePathElement
&PathElement
: Path
) {
1531 // Find the offset along this inheritance step.
1532 const CXXRecordDecl
*Base
=
1533 PathElement
.Base
->getType()->getAsCXXRecordDecl();
1534 if (PathElement
.Base
->isVirtual()) {
1535 // For a virtual base class, we know that the derived class is exactly
1536 // DestDecl, so we can use the vbase offset from its layout.
1537 const ASTRecordLayout
&L
= Context
.getASTRecordLayout(DestDecl
);
1538 PathOffset
= L
.getVBaseClassOffset(Base
);
1540 const ASTRecordLayout
&L
=
1541 Context
.getASTRecordLayout(PathElement
.Class
);
1542 PathOffset
+= L
.getBaseClassOffset(Base
);
1547 Offset
= PathOffset
;
1548 else if (Offset
!= PathOffset
) {
1549 // Base appears in at least two different places. Find the most-derived
1550 // object and see if it's a DestDecl. Note that the most-derived object
1551 // must be at least as aligned as this base class subobject, and must
1552 // have a vptr at offset 0.
1553 ThisAddr
= Address(emitDynamicCastToVoid(CGF
, ThisAddr
, SrcRecordTy
),
1554 CGF
.VoidPtrTy
, ThisAddr
.getAlignment());
1556 Offset
= CharUnits::Zero();
1562 // If there are no public inheritance paths, the cast always fails.
1563 CGF
.EmitBranch(CastFail
);
1564 return llvm::PoisonValue::get(CGF
.VoidPtrTy
);
1567 // Compare the vptr against the expected vptr for the destination type at
1568 // this offset. Note that we do not know what type ThisAddr points to in
1569 // the case where the derived class multiply inherits from the base class
1570 // so we can't use GetVTablePtr, so we load the vptr directly instead.
1571 llvm::Instruction
*VPtr
= CGF
.Builder
.CreateLoad(
1572 ThisAddr
.withElementType(CGF
.VoidPtrPtrTy
), "vtable");
1573 CGM
.DecorateInstructionWithTBAA(
1574 VPtr
, CGM
.getTBAAVTablePtrAccessInfo(CGF
.VoidPtrPtrTy
));
1575 llvm::Value
*Success
= CGF
.Builder
.CreateICmpEQ(
1576 VPtr
, getVTableAddressPoint(BaseSubobject(SrcDecl
, *Offset
), DestDecl
));
1577 llvm::Value
*Result
= ThisAddr
.getPointer();
1578 if (!Offset
->isZero())
1579 Result
= CGF
.Builder
.CreateInBoundsGEP(
1581 {llvm::ConstantInt::get(CGF
.PtrDiffTy
, -Offset
->getQuantity())});
1582 CGF
.Builder
.CreateCondBr(Success
, CastSuccess
, CastFail
);
1586 llvm::Value
*ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction
&CGF
,
1588 QualType SrcRecordTy
) {
1590 cast
<CXXRecordDecl
>(SrcRecordTy
->castAs
<RecordType
>()->getDecl());
1591 llvm::Value
*OffsetToTop
;
1592 if (CGM
.getItaniumVTableContext().isRelativeLayout()) {
1593 // Get the vtable pointer.
1594 llvm::Value
*VTable
= CGF
.GetVTablePtr(
1595 ThisAddr
, llvm::PointerType::getUnqual(CGF
.getLLVMContext()),
1598 // Get the offset-to-top from the vtable.
1600 CGF
.Builder
.CreateConstInBoundsGEP1_32(CGM
.Int32Ty
, VTable
, -2U);
1601 OffsetToTop
= CGF
.Builder
.CreateAlignedLoad(
1602 CGM
.Int32Ty
, OffsetToTop
, CharUnits::fromQuantity(4), "offset.to.top");
1604 llvm::Type
*PtrDiffLTy
=
1605 CGF
.ConvertType(CGF
.getContext().getPointerDiffType());
1607 // Get the vtable pointer.
1608 llvm::Value
*VTable
= CGF
.GetVTablePtr(
1609 ThisAddr
, llvm::PointerType::getUnqual(CGF
.getLLVMContext()),
1612 // Get the offset-to-top from the vtable.
1614 CGF
.Builder
.CreateConstInBoundsGEP1_64(PtrDiffLTy
, VTable
, -2ULL);
1615 OffsetToTop
= CGF
.Builder
.CreateAlignedLoad(
1616 PtrDiffLTy
, OffsetToTop
, CGF
.getPointerAlign(), "offset.to.top");
1618 // Finally, add the offset to the pointer.
1619 return CGF
.Builder
.CreateInBoundsGEP(CGF
.Int8Ty
, ThisAddr
.getPointer(),
1623 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction
&CGF
) {
1624 llvm::FunctionCallee Fn
= getBadCastFn(CGF
);
1625 llvm::CallBase
*Call
= CGF
.EmitRuntimeCallOrInvoke(Fn
);
1626 Call
->setDoesNotReturn();
1627 CGF
.Builder
.CreateUnreachable();
1632 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction
&CGF
,
1634 const CXXRecordDecl
*ClassDecl
,
1635 const CXXRecordDecl
*BaseClassDecl
) {
1636 llvm::Value
*VTablePtr
= CGF
.GetVTablePtr(This
, CGM
.Int8PtrTy
, ClassDecl
);
1637 CharUnits VBaseOffsetOffset
=
1638 CGM
.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl
,
1640 llvm::Value
*VBaseOffsetPtr
=
1641 CGF
.Builder
.CreateConstGEP1_64(
1642 CGF
.Int8Ty
, VTablePtr
, VBaseOffsetOffset
.getQuantity(),
1643 "vbase.offset.ptr");
1645 llvm::Value
*VBaseOffset
;
1646 if (CGM
.getItaniumVTableContext().isRelativeLayout()) {
1647 VBaseOffset
= CGF
.Builder
.CreateAlignedLoad(
1648 CGF
.Int32Ty
, VBaseOffsetPtr
, CharUnits::fromQuantity(4),
1651 VBaseOffset
= CGF
.Builder
.CreateAlignedLoad(
1652 CGM
.PtrDiffTy
, VBaseOffsetPtr
, CGF
.getPointerAlign(), "vbase.offset");
1657 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl
*D
) {
1658 // Just make sure we're in sync with TargetCXXABI.
1659 assert(CGM
.getTarget().getCXXABI().hasConstructorVariants());
1661 // The constructor used for constructing this as a base class;
1662 // ignores virtual bases.
1663 CGM
.EmitGlobal(GlobalDecl(D
, Ctor_Base
));
1665 // The constructor used for constructing this as a complete class;
1666 // constructs the virtual bases, then calls the base constructor.
1667 if (!D
->getParent()->isAbstract()) {
1668 // We don't need to emit the complete ctor if the class is abstract.
1669 CGM
.EmitGlobal(GlobalDecl(D
, Ctor_Complete
));
1673 CGCXXABI::AddedStructorArgCounts
1674 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD
,
1675 SmallVectorImpl
<CanQualType
> &ArgTys
) {
1676 ASTContext
&Context
= getContext();
1678 // All parameters are already in place except VTT, which goes after 'this'.
1679 // These are Clang types, so we don't need to worry about sret yet.
1681 // Check if we need to add a VTT parameter (which has type global void **).
1682 if ((isa
<CXXConstructorDecl
>(GD
.getDecl()) ? GD
.getCtorType() == Ctor_Base
1683 : GD
.getDtorType() == Dtor_Base
) &&
1684 cast
<CXXMethodDecl
>(GD
.getDecl())->getParent()->getNumVBases() != 0) {
1685 LangAS AS
= CGM
.GetGlobalVarAddressSpace(nullptr);
1686 QualType Q
= Context
.getAddrSpaceQualType(Context
.VoidPtrTy
, AS
);
1687 ArgTys
.insert(ArgTys
.begin() + 1,
1688 Context
.getPointerType(CanQualType::CreateUnsafe(Q
)));
1689 return AddedStructorArgCounts::prefix(1);
1691 return AddedStructorArgCounts
{};
1694 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl
*D
) {
1695 // The destructor used for destructing this as a base class; ignores
1697 CGM
.EmitGlobal(GlobalDecl(D
, Dtor_Base
));
1699 // The destructor used for destructing this as a most-derived class;
1700 // call the base destructor and then destructs any virtual bases.
1701 CGM
.EmitGlobal(GlobalDecl(D
, Dtor_Complete
));
1703 // The destructor in a virtual table is always a 'deleting'
1704 // destructor, which calls the complete destructor and then uses the
1705 // appropriate operator delete.
1707 CGM
.EmitGlobal(GlobalDecl(D
, Dtor_Deleting
));
1710 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction
&CGF
,
1712 FunctionArgList
&Params
) {
1713 const CXXMethodDecl
*MD
= cast
<CXXMethodDecl
>(CGF
.CurGD
.getDecl());
1714 assert(isa
<CXXConstructorDecl
>(MD
) || isa
<CXXDestructorDecl
>(MD
));
1716 // Check if we need a VTT parameter as well.
1717 if (NeedsVTTParameter(CGF
.CurGD
)) {
1718 ASTContext
&Context
= getContext();
1720 // FIXME: avoid the fake decl
1721 LangAS AS
= CGM
.GetGlobalVarAddressSpace(nullptr);
1722 QualType Q
= Context
.getAddrSpaceQualType(Context
.VoidPtrTy
, AS
);
1723 QualType T
= Context
.getPointerType(Q
);
1724 auto *VTTDecl
= ImplicitParamDecl::Create(
1725 Context
, /*DC=*/nullptr, MD
->getLocation(), &Context
.Idents
.get("vtt"),
1726 T
, ImplicitParamDecl::CXXVTT
);
1727 Params
.insert(Params
.begin() + 1, VTTDecl
);
1728 getStructorImplicitParamDecl(CGF
) = VTTDecl
;
1732 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction
&CGF
) {
1733 // Naked functions have no prolog.
1734 if (CGF
.CurFuncDecl
&& CGF
.CurFuncDecl
->hasAttr
<NakedAttr
>())
1737 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1738 /// adjustments are required, because they are all handled by thunks.
1739 setCXXABIThisValue(CGF
, loadIncomingCXXThis(CGF
));
1741 /// Initialize the 'vtt' slot if needed.
1742 if (getStructorImplicitParamDecl(CGF
)) {
1743 getStructorImplicitParamValue(CGF
) = CGF
.Builder
.CreateLoad(
1744 CGF
.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF
)), "vtt");
1747 /// If this is a function that the ABI specifies returns 'this', initialize
1748 /// the return slot to 'this' at the start of the function.
1750 /// Unlike the setting of return types, this is done within the ABI
1751 /// implementation instead of by clients of CGCXXABI because:
1752 /// 1) getThisValue is currently protected
1753 /// 2) in theory, an ABI could implement 'this' returns some other way;
1754 /// HasThisReturn only specifies a contract, not the implementation
1755 if (HasThisReturn(CGF
.CurGD
))
1756 CGF
.Builder
.CreateStore(getThisValue(CGF
), CGF
.ReturnValue
);
1759 CGCXXABI::AddedStructorArgs
ItaniumCXXABI::getImplicitConstructorArgs(
1760 CodeGenFunction
&CGF
, const CXXConstructorDecl
*D
, CXXCtorType Type
,
1761 bool ForVirtualBase
, bool Delegating
) {
1762 if (!NeedsVTTParameter(GlobalDecl(D
, Type
)))
1763 return AddedStructorArgs
{};
1765 // Insert the implicit 'vtt' argument as the second argument. Make sure to
1766 // correctly reflect its address space, which can differ from generic on
1769 CGF
.GetVTTParameter(GlobalDecl(D
, Type
), ForVirtualBase
, Delegating
);
1770 LangAS AS
= CGM
.GetGlobalVarAddressSpace(nullptr);
1771 QualType Q
= getContext().getAddrSpaceQualType(getContext().VoidPtrTy
, AS
);
1772 QualType VTTTy
= getContext().getPointerType(Q
);
1773 return AddedStructorArgs::prefix({{VTT
, VTTTy
}});
1776 llvm::Value
*ItaniumCXXABI::getCXXDestructorImplicitParam(
1777 CodeGenFunction
&CGF
, const CXXDestructorDecl
*DD
, CXXDtorType Type
,
1778 bool ForVirtualBase
, bool Delegating
) {
1779 GlobalDecl
GD(DD
, Type
);
1780 return CGF
.GetVTTParameter(GD
, ForVirtualBase
, Delegating
);
1783 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction
&CGF
,
1784 const CXXDestructorDecl
*DD
,
1785 CXXDtorType Type
, bool ForVirtualBase
,
1786 bool Delegating
, Address This
,
1788 GlobalDecl
GD(DD
, Type
);
1790 getCXXDestructorImplicitParam(CGF
, DD
, Type
, ForVirtualBase
, Delegating
);
1791 QualType VTTTy
= getContext().getPointerType(getContext().VoidPtrTy
);
1794 if (getContext().getLangOpts().AppleKext
&&
1795 Type
!= Dtor_Base
&& DD
->isVirtual())
1796 Callee
= CGF
.BuildAppleKextVirtualDestructorCall(DD
, Type
, DD
->getParent());
1798 Callee
= CGCallee::forDirect(CGM
.getAddrOfCXXStructor(GD
), GD
);
1800 CGF
.EmitCXXDestructorCall(GD
, Callee
, This
.getPointer(), ThisTy
, VTT
, VTTTy
,
1804 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables
&CGVT
,
1805 const CXXRecordDecl
*RD
) {
1806 llvm::GlobalVariable
*VTable
= getAddrOfVTable(RD
, CharUnits());
1807 if (VTable
->hasInitializer())
1810 ItaniumVTableContext
&VTContext
= CGM
.getItaniumVTableContext();
1811 const VTableLayout
&VTLayout
= VTContext
.getVTableLayout(RD
);
1812 llvm::GlobalVariable::LinkageTypes Linkage
= CGM
.getVTableLinkage(RD
);
1813 llvm::Constant
*RTTI
=
1814 CGM
.GetAddrOfRTTIDescriptor(CGM
.getContext().getTagDeclType(RD
));
1816 // Create and set the initializer.
1817 ConstantInitBuilder
builder(CGM
);
1818 auto components
= builder
.beginStruct();
1819 CGVT
.createVTableInitializer(components
, VTLayout
, RTTI
,
1820 llvm::GlobalValue::isLocalLinkage(Linkage
));
1821 components
.finishAndSetAsInitializer(VTable
);
1823 // Set the correct linkage.
1824 VTable
->setLinkage(Linkage
);
1826 if (CGM
.supportsCOMDAT() && VTable
->isWeakForLinker())
1827 VTable
->setComdat(CGM
.getModule().getOrInsertComdat(VTable
->getName()));
1829 // Set the right visibility.
1830 CGM
.setGVProperties(VTable
, RD
);
1832 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1833 // we will emit the typeinfo for the fundamental types. This is the
1834 // same behaviour as GCC.
1835 const DeclContext
*DC
= RD
->getDeclContext();
1836 if (RD
->getIdentifier() &&
1837 RD
->getIdentifier()->isStr("__fundamental_type_info") &&
1838 isa
<NamespaceDecl
>(DC
) && cast
<NamespaceDecl
>(DC
)->getIdentifier() &&
1839 cast
<NamespaceDecl
>(DC
)->getIdentifier()->isStr("__cxxabiv1") &&
1840 DC
->getParent()->isTranslationUnit())
1841 EmitFundamentalRTTIDescriptors(RD
);
1843 // Always emit type metadata on non-available_externally definitions, and on
1844 // available_externally definitions if we are performing whole program
1845 // devirtualization. For WPD we need the type metadata on all vtable
1846 // definitions to ensure we associate derived classes with base classes
1847 // defined in headers but with a strong definition only in a shared library.
1848 if (!VTable
->isDeclarationForLinker() ||
1849 CGM
.getCodeGenOpts().WholeProgramVTables
) {
1850 CGM
.EmitVTableTypeMetadata(RD
, VTable
, VTLayout
);
1851 // For available_externally definitions, add the vtable to
1852 // @llvm.compiler.used so that it isn't deleted before whole program
1854 if (VTable
->isDeclarationForLinker()) {
1855 assert(CGM
.getCodeGenOpts().WholeProgramVTables
);
1856 CGM
.addCompilerUsedGlobal(VTable
);
1860 if (VTContext
.isRelativeLayout()) {
1861 CGVT
.RemoveHwasanMetadata(VTable
);
1862 if (!VTable
->isDSOLocal())
1863 CGVT
.GenerateRelativeVTableAlias(VTable
, VTable
->getName());
1867 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1868 CodeGenFunction
&CGF
, CodeGenFunction::VPtr Vptr
) {
1869 if (Vptr
.NearestVBase
== nullptr)
1871 return NeedsVTTParameter(CGF
.CurGD
);
1874 llvm::Value
*ItaniumCXXABI::getVTableAddressPointInStructor(
1875 CodeGenFunction
&CGF
, const CXXRecordDecl
*VTableClass
, BaseSubobject Base
,
1876 const CXXRecordDecl
*NearestVBase
) {
1878 if ((Base
.getBase()->getNumVBases() || NearestVBase
!= nullptr) &&
1879 NeedsVTTParameter(CGF
.CurGD
)) {
1880 return getVTableAddressPointInStructorWithVTT(CGF
, VTableClass
, Base
,
1883 return getVTableAddressPoint(Base
, VTableClass
);
1887 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base
,
1888 const CXXRecordDecl
*VTableClass
) {
1889 llvm::GlobalValue
*VTable
= getAddrOfVTable(VTableClass
, CharUnits());
1891 // Find the appropriate vtable within the vtable group, and the address point
1892 // within that vtable.
1893 VTableLayout::AddressPointLocation AddressPoint
=
1894 CGM
.getItaniumVTableContext()
1895 .getVTableLayout(VTableClass
)
1896 .getAddressPoint(Base
);
1897 llvm::Value
*Indices
[] = {
1898 llvm::ConstantInt::get(CGM
.Int32Ty
, 0),
1899 llvm::ConstantInt::get(CGM
.Int32Ty
, AddressPoint
.VTableIndex
),
1900 llvm::ConstantInt::get(CGM
.Int32Ty
, AddressPoint
.AddressPointIndex
),
1903 return llvm::ConstantExpr::getGetElementPtr(VTable
->getValueType(), VTable
,
1904 Indices
, /*InBounds=*/true,
1905 /*InRangeIndex=*/1);
1908 // Check whether all the non-inline virtual methods for the class have the
1909 // specified attribute.
1910 template <typename T
>
1911 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl
*RD
) {
1912 bool FoundNonInlineVirtualMethodWithAttr
= false;
1913 for (const auto *D
: RD
->noload_decls()) {
1914 if (const auto *FD
= dyn_cast
<FunctionDecl
>(D
)) {
1915 if (!FD
->isVirtualAsWritten() || FD
->isInlineSpecified() ||
1916 FD
->doesThisDeclarationHaveABody())
1918 if (!D
->hasAttr
<T
>())
1920 FoundNonInlineVirtualMethodWithAttr
= true;
1924 // We didn't find any non-inline virtual methods missing the attribute. We
1925 // will return true when we found at least one non-inline virtual with the
1926 // attribute. (This lets our caller know that the attribute needs to be
1927 // propagated up to the vtable.)
1928 return FoundNonInlineVirtualMethodWithAttr
;
1931 llvm::Value
*ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1932 CodeGenFunction
&CGF
, const CXXRecordDecl
*VTableClass
, BaseSubobject Base
,
1933 const CXXRecordDecl
*NearestVBase
) {
1934 assert((Base
.getBase()->getNumVBases() || NearestVBase
!= nullptr) &&
1935 NeedsVTTParameter(CGF
.CurGD
) && "This class doesn't have VTT");
1937 // Get the secondary vpointer index.
1938 uint64_t VirtualPointerIndex
=
1939 CGM
.getVTables().getSecondaryVirtualPointerIndex(VTableClass
, Base
);
1942 llvm::Value
*VTT
= CGF
.LoadCXXVTT();
1943 if (VirtualPointerIndex
)
1944 VTT
= CGF
.Builder
.CreateConstInBoundsGEP1_64(CGF
.GlobalsVoidPtrTy
, VTT
,
1945 VirtualPointerIndex
);
1947 // And load the address point from the VTT.
1948 return CGF
.Builder
.CreateAlignedLoad(CGF
.GlobalsVoidPtrTy
, VTT
,
1949 CGF
.getPointerAlign());
1952 llvm::Constant
*ItaniumCXXABI::getVTableAddressPointForConstExpr(
1953 BaseSubobject Base
, const CXXRecordDecl
*VTableClass
) {
1954 return getVTableAddressPoint(Base
, VTableClass
);
1957 llvm::GlobalVariable
*ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl
*RD
,
1958 CharUnits VPtrOffset
) {
1959 assert(VPtrOffset
.isZero() && "Itanium ABI only supports zero vptr offsets");
1961 llvm::GlobalVariable
*&VTable
= VTables
[RD
];
1965 // Queue up this vtable for possible deferred emission.
1966 CGM
.addDeferredVTable(RD
);
1968 SmallString
<256> Name
;
1969 llvm::raw_svector_ostream
Out(Name
);
1970 getMangleContext().mangleCXXVTable(RD
, Out
);
1972 const VTableLayout
&VTLayout
=
1973 CGM
.getItaniumVTableContext().getVTableLayout(RD
);
1974 llvm::Type
*VTableType
= CGM
.getVTables().getVTableType(VTLayout
);
1976 // Use pointer to global alignment for the vtable. Otherwise we would align
1977 // them based on the size of the initializer which doesn't make sense as only
1978 // single values are read.
1979 LangAS AS
= CGM
.GetGlobalVarAddressSpace(nullptr);
1980 unsigned PAlign
= CGM
.getItaniumVTableContext().isRelativeLayout()
1982 : CGM
.getTarget().getPointerAlign(AS
);
1984 VTable
= CGM
.CreateOrReplaceCXXRuntimeVariable(
1985 Name
, VTableType
, llvm::GlobalValue::ExternalLinkage
,
1986 getContext().toCharUnitsFromBits(PAlign
).getAsAlign());
1987 VTable
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
1989 // In MS C++ if you have a class with virtual functions in which you are using
1990 // selective member import/export, then all virtual functions must be exported
1991 // unless they are inline, otherwise a link error will result. To match this
1992 // behavior, for such classes, we dllimport the vtable if it is defined
1993 // externally and all the non-inline virtual methods are marked dllimport, and
1994 // we dllexport the vtable if it is defined in this TU and all the non-inline
1995 // virtual methods are marked dllexport.
1996 if (CGM
.getTarget().hasPS4DLLImportExport()) {
1997 if ((!RD
->hasAttr
<DLLImportAttr
>()) && (!RD
->hasAttr
<DLLExportAttr
>())) {
1998 if (CGM
.getVTables().isVTableExternal(RD
)) {
1999 if (CXXRecordAllNonInlineVirtualsHaveAttr
<DLLImportAttr
>(RD
))
2000 VTable
->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass
);
2002 if (CXXRecordAllNonInlineVirtualsHaveAttr
<DLLExportAttr
>(RD
))
2003 VTable
->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass
);
2007 CGM
.setGVProperties(VTable
, RD
);
2012 CGCallee
ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction
&CGF
,
2016 SourceLocation Loc
) {
2017 llvm::Type
*PtrTy
= CGM
.GlobalsInt8PtrTy
;
2018 auto *MethodDecl
= cast
<CXXMethodDecl
>(GD
.getDecl());
2019 llvm::Value
*VTable
= CGF
.GetVTablePtr(This
, PtrTy
, MethodDecl
->getParent());
2021 uint64_t VTableIndex
= CGM
.getItaniumVTableContext().getMethodVTableIndex(GD
);
2023 if (CGF
.ShouldEmitVTableTypeCheckedLoad(MethodDecl
->getParent())) {
2024 VFunc
= CGF
.EmitVTableTypeCheckedLoad(
2025 MethodDecl
->getParent(), VTable
, PtrTy
,
2027 CGM
.getContext().getTargetInfo().getPointerWidth(LangAS::Default
) /
2030 CGF
.EmitTypeMetadataCodeForVCall(MethodDecl
->getParent(), VTable
, Loc
);
2032 llvm::Value
*VFuncLoad
;
2033 if (CGM
.getItaniumVTableContext().isRelativeLayout()) {
2034 VFuncLoad
= CGF
.Builder
.CreateCall(
2035 CGM
.getIntrinsic(llvm::Intrinsic::load_relative
, {CGM
.Int32Ty
}),
2036 {VTable
, llvm::ConstantInt::get(CGM
.Int32Ty
, 4 * VTableIndex
)});
2038 llvm::Value
*VTableSlotPtr
= CGF
.Builder
.CreateConstInBoundsGEP1_64(
2039 PtrTy
, VTable
, VTableIndex
, "vfn");
2040 VFuncLoad
= CGF
.Builder
.CreateAlignedLoad(PtrTy
, VTableSlotPtr
,
2041 CGF
.getPointerAlign());
2044 // Add !invariant.load md to virtual function load to indicate that
2045 // function didn't change inside vtable.
2046 // It's safe to add it without -fstrict-vtable-pointers, but it would not
2047 // help in devirtualization because it will only matter if we will have 2
2048 // the same virtual function loads from the same vtable load, which won't
2049 // happen without enabled devirtualization with -fstrict-vtable-pointers.
2050 if (CGM
.getCodeGenOpts().OptimizationLevel
> 0 &&
2051 CGM
.getCodeGenOpts().StrictVTablePointers
) {
2052 if (auto *VFuncLoadInstr
= dyn_cast
<llvm::Instruction
>(VFuncLoad
)) {
2053 VFuncLoadInstr
->setMetadata(
2054 llvm::LLVMContext::MD_invariant_load
,
2055 llvm::MDNode::get(CGM
.getLLVMContext(),
2056 llvm::ArrayRef
<llvm::Metadata
*>()));
2062 CGCallee
Callee(GD
, VFunc
);
2066 llvm::Value
*ItaniumCXXABI::EmitVirtualDestructorCall(
2067 CodeGenFunction
&CGF
, const CXXDestructorDecl
*Dtor
, CXXDtorType DtorType
,
2068 Address This
, DeleteOrMemberCallExpr E
) {
2069 auto *CE
= E
.dyn_cast
<const CXXMemberCallExpr
*>();
2070 auto *D
= E
.dyn_cast
<const CXXDeleteExpr
*>();
2071 assert((CE
!= nullptr) ^ (D
!= nullptr));
2072 assert(CE
== nullptr || CE
->arg_begin() == CE
->arg_end());
2073 assert(DtorType
== Dtor_Deleting
|| DtorType
== Dtor_Complete
);
2075 GlobalDecl
GD(Dtor
, DtorType
);
2076 const CGFunctionInfo
*FInfo
=
2077 &CGM
.getTypes().arrangeCXXStructorDeclaration(GD
);
2078 llvm::FunctionType
*Ty
= CGF
.CGM
.getTypes().GetFunctionType(*FInfo
);
2079 CGCallee Callee
= CGCallee::forVirtual(CE
, GD
, This
, Ty
);
2083 ThisTy
= CE
->getObjectType();
2085 ThisTy
= D
->getDestroyedType();
2088 CGF
.EmitCXXDestructorCall(GD
, Callee
, This
.getPointer(), ThisTy
, nullptr,
2089 QualType(), nullptr);
2093 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl
*RD
) {
2094 CodeGenVTables
&VTables
= CGM
.getVTables();
2095 llvm::GlobalVariable
*VTT
= VTables
.GetAddrOfVTT(RD
);
2096 VTables
.EmitVTTDefinition(VTT
, CGM
.getVTableLinkage(RD
), RD
);
2099 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2100 const CXXRecordDecl
*RD
) const {
2101 // We don't emit available_externally vtables if we are in -fapple-kext mode
2102 // because kext mode does not permit devirtualization.
2103 if (CGM
.getLangOpts().AppleKext
)
2106 // If the vtable is hidden then it is not safe to emit an available_externally
2108 if (isVTableHidden(RD
))
2111 if (CGM
.getCodeGenOpts().ForceEmitVTables
)
2114 // If we don't have any not emitted inline virtual function then we are safe
2115 // to emit an available_externally copy of vtable.
2116 // FIXME we can still emit a copy of the vtable if we
2117 // can emit definition of the inline functions.
2118 if (hasAnyUnusedVirtualInlineFunction(RD
))
2121 // For a class with virtual bases, we must also be able to speculatively
2122 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2123 // the vtable" and "can emit the VTT". For a base subobject, this means we
2124 // need to be able to emit non-virtual base vtables.
2125 if (RD
->getNumVBases()) {
2126 for (const auto &B
: RD
->bases()) {
2127 auto *BRD
= B
.getType()->getAsCXXRecordDecl();
2128 assert(BRD
&& "no class for base specifier");
2129 if (B
.isVirtual() || !BRD
->isDynamicClass())
2131 if (!canSpeculativelyEmitVTableAsBaseClass(BRD
))
2139 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl
*RD
) const {
2140 if (!canSpeculativelyEmitVTableAsBaseClass(RD
))
2143 // For a complete-object vtable (or more specifically, for the VTT), we need
2144 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2145 for (const auto &B
: RD
->vbases()) {
2146 auto *BRD
= B
.getType()->getAsCXXRecordDecl();
2147 assert(BRD
&& "no class for base specifier");
2148 if (!BRD
->isDynamicClass())
2150 if (!canSpeculativelyEmitVTableAsBaseClass(BRD
))
2156 static llvm::Value
*performTypeAdjustment(CodeGenFunction
&CGF
,
2158 int64_t NonVirtualAdjustment
,
2159 int64_t VirtualAdjustment
,
2160 bool IsReturnAdjustment
) {
2161 if (!NonVirtualAdjustment
&& !VirtualAdjustment
)
2162 return InitialPtr
.getPointer();
2164 Address V
= InitialPtr
.withElementType(CGF
.Int8Ty
);
2166 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2167 if (NonVirtualAdjustment
&& !IsReturnAdjustment
) {
2168 V
= CGF
.Builder
.CreateConstInBoundsByteGEP(V
,
2169 CharUnits::fromQuantity(NonVirtualAdjustment
));
2172 // Perform the virtual adjustment if we have one.
2173 llvm::Value
*ResultPtr
;
2174 if (VirtualAdjustment
) {
2175 Address VTablePtrPtr
= V
.withElementType(CGF
.Int8PtrTy
);
2176 llvm::Value
*VTablePtr
= CGF
.Builder
.CreateLoad(VTablePtrPtr
);
2178 llvm::Value
*Offset
;
2179 llvm::Value
*OffsetPtr
= CGF
.Builder
.CreateConstInBoundsGEP1_64(
2180 CGF
.Int8Ty
, VTablePtr
, VirtualAdjustment
);
2181 if (CGF
.CGM
.getItaniumVTableContext().isRelativeLayout()) {
2182 // Load the adjustment offset from the vtable as a 32-bit int.
2184 CGF
.Builder
.CreateAlignedLoad(CGF
.Int32Ty
, OffsetPtr
,
2185 CharUnits::fromQuantity(4));
2187 llvm::Type
*PtrDiffTy
=
2188 CGF
.ConvertType(CGF
.getContext().getPointerDiffType());
2190 // Load the adjustment offset from the vtable.
2191 Offset
= CGF
.Builder
.CreateAlignedLoad(PtrDiffTy
, OffsetPtr
,
2192 CGF
.getPointerAlign());
2194 // Adjust our pointer.
2195 ResultPtr
= CGF
.Builder
.CreateInBoundsGEP(
2196 V
.getElementType(), V
.getPointer(), Offset
);
2198 ResultPtr
= V
.getPointer();
2201 // In a derived-to-base conversion, the non-virtual adjustment is
2203 if (NonVirtualAdjustment
&& IsReturnAdjustment
) {
2204 ResultPtr
= CGF
.Builder
.CreateConstInBoundsGEP1_64(CGF
.Int8Ty
, ResultPtr
,
2205 NonVirtualAdjustment
);
2211 llvm::Value
*ItaniumCXXABI::performThisAdjustment(CodeGenFunction
&CGF
,
2213 const ThisAdjustment
&TA
) {
2214 return performTypeAdjustment(CGF
, This
, TA
.NonVirtual
,
2215 TA
.Virtual
.Itanium
.VCallOffsetOffset
,
2216 /*IsReturnAdjustment=*/false);
2220 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction
&CGF
, Address Ret
,
2221 const ReturnAdjustment
&RA
) {
2222 return performTypeAdjustment(CGF
, Ret
, RA
.NonVirtual
,
2223 RA
.Virtual
.Itanium
.VBaseOffsetOffset
,
2224 /*IsReturnAdjustment=*/true);
2227 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction
&CGF
,
2228 RValue RV
, QualType ResultType
) {
2229 if (!isa
<CXXDestructorDecl
>(CGF
.CurGD
.getDecl()))
2230 return ItaniumCXXABI::EmitReturnFromThunk(CGF
, RV
, ResultType
);
2232 // Destructor thunks in the ARM ABI have indeterminate results.
2233 llvm::Type
*T
= CGF
.ReturnValue
.getElementType();
2234 RValue Undef
= RValue::get(llvm::UndefValue::get(T
));
2235 return ItaniumCXXABI::EmitReturnFromThunk(CGF
, Undef
, ResultType
);
2238 /************************** Array allocation cookies **************************/
2240 CharUnits
ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType
) {
2241 // The array cookie is a size_t; pad that up to the element alignment.
2242 // The cookie is actually right-justified in that space.
2243 return std::max(CharUnits::fromQuantity(CGM
.SizeSizeInBytes
),
2244 CGM
.getContext().getPreferredTypeAlignInChars(elementType
));
2247 Address
ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction
&CGF
,
2249 llvm::Value
*NumElements
,
2250 const CXXNewExpr
*expr
,
2251 QualType ElementType
) {
2252 assert(requiresArrayCookie(expr
));
2254 unsigned AS
= NewPtr
.getAddressSpace();
2256 ASTContext
&Ctx
= getContext();
2257 CharUnits SizeSize
= CGF
.getSizeSize();
2259 // The size of the cookie.
2260 CharUnits CookieSize
=
2261 std::max(SizeSize
, Ctx
.getPreferredTypeAlignInChars(ElementType
));
2262 assert(CookieSize
== getArrayCookieSizeImpl(ElementType
));
2264 // Compute an offset to the cookie.
2265 Address CookiePtr
= NewPtr
;
2266 CharUnits CookieOffset
= CookieSize
- SizeSize
;
2267 if (!CookieOffset
.isZero())
2268 CookiePtr
= CGF
.Builder
.CreateConstInBoundsByteGEP(CookiePtr
, CookieOffset
);
2270 // Write the number of elements into the appropriate slot.
2271 Address NumElementsPtr
= CookiePtr
.withElementType(CGF
.SizeTy
);
2272 llvm::Instruction
*SI
= CGF
.Builder
.CreateStore(NumElements
, NumElementsPtr
);
2274 // Handle the array cookie specially in ASan.
2275 if (CGM
.getLangOpts().Sanitize
.has(SanitizerKind::Address
) && AS
== 0 &&
2276 (expr
->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2277 CGM
.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie
)) {
2278 // The store to the CookiePtr does not need to be instrumented.
2279 SI
->setNoSanitizeMetadata();
2280 llvm::FunctionType
*FTy
=
2281 llvm::FunctionType::get(CGM
.VoidTy
, NumElementsPtr
.getType(), false);
2282 llvm::FunctionCallee F
=
2283 CGM
.CreateRuntimeFunction(FTy
, "__asan_poison_cxx_array_cookie");
2284 CGF
.Builder
.CreateCall(F
, NumElementsPtr
.getPointer());
2287 // Finally, compute a pointer to the actual data buffer by skipping
2288 // over the cookie completely.
2289 return CGF
.Builder
.CreateConstInBoundsByteGEP(NewPtr
, CookieSize
);
2292 llvm::Value
*ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction
&CGF
,
2294 CharUnits cookieSize
) {
2295 // The element size is right-justified in the cookie.
2296 Address numElementsPtr
= allocPtr
;
2297 CharUnits numElementsOffset
= cookieSize
- CGF
.getSizeSize();
2298 if (!numElementsOffset
.isZero())
2300 CGF
.Builder
.CreateConstInBoundsByteGEP(numElementsPtr
, numElementsOffset
);
2302 unsigned AS
= allocPtr
.getAddressSpace();
2303 numElementsPtr
= numElementsPtr
.withElementType(CGF
.SizeTy
);
2304 if (!CGM
.getLangOpts().Sanitize
.has(SanitizerKind::Address
) || AS
!= 0)
2305 return CGF
.Builder
.CreateLoad(numElementsPtr
);
2306 // In asan mode emit a function call instead of a regular load and let the
2307 // run-time deal with it: if the shadow is properly poisoned return the
2308 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2309 // We can't simply ignore this load using nosanitize metadata because
2310 // the metadata may be lost.
2311 llvm::FunctionType
*FTy
= llvm::FunctionType::get(
2312 CGF
.SizeTy
, llvm::PointerType::getUnqual(CGF
.getLLVMContext()), false);
2313 llvm::FunctionCallee F
=
2314 CGM
.CreateRuntimeFunction(FTy
, "__asan_load_cxx_array_cookie");
2315 return CGF
.Builder
.CreateCall(F
, numElementsPtr
.getPointer());
2318 CharUnits
ARMCXXABI::getArrayCookieSizeImpl(QualType elementType
) {
2319 // ARM says that the cookie is always:
2320 // struct array_cookie {
2321 // std::size_t element_size; // element_size != 0
2322 // std::size_t element_count;
2324 // But the base ABI doesn't give anything an alignment greater than
2325 // 8, so we can dismiss this as typical ABI-author blindness to
2326 // actual language complexity and round up to the element alignment.
2327 return std::max(CharUnits::fromQuantity(2 * CGM
.SizeSizeInBytes
),
2328 CGM
.getContext().getTypeAlignInChars(elementType
));
2331 Address
ARMCXXABI::InitializeArrayCookie(CodeGenFunction
&CGF
,
2333 llvm::Value
*numElements
,
2334 const CXXNewExpr
*expr
,
2335 QualType elementType
) {
2336 assert(requiresArrayCookie(expr
));
2338 // The cookie is always at the start of the buffer.
2339 Address cookie
= newPtr
;
2341 // The first element is the element size.
2342 cookie
= cookie
.withElementType(CGF
.SizeTy
);
2343 llvm::Value
*elementSize
= llvm::ConstantInt::get(CGF
.SizeTy
,
2344 getContext().getTypeSizeInChars(elementType
).getQuantity());
2345 CGF
.Builder
.CreateStore(elementSize
, cookie
);
2347 // The second element is the element count.
2348 cookie
= CGF
.Builder
.CreateConstInBoundsGEP(cookie
, 1);
2349 CGF
.Builder
.CreateStore(numElements
, cookie
);
2351 // Finally, compute a pointer to the actual data buffer by skipping
2352 // over the cookie completely.
2353 CharUnits cookieSize
= ARMCXXABI::getArrayCookieSizeImpl(elementType
);
2354 return CGF
.Builder
.CreateConstInBoundsByteGEP(newPtr
, cookieSize
);
2357 llvm::Value
*ARMCXXABI::readArrayCookieImpl(CodeGenFunction
&CGF
,
2359 CharUnits cookieSize
) {
2360 // The number of elements is at offset sizeof(size_t) relative to
2361 // the allocated pointer.
2362 Address numElementsPtr
2363 = CGF
.Builder
.CreateConstInBoundsByteGEP(allocPtr
, CGF
.getSizeSize());
2365 numElementsPtr
= numElementsPtr
.withElementType(CGF
.SizeTy
);
2366 return CGF
.Builder
.CreateLoad(numElementsPtr
);
2369 /*********************** Static local initialization **************************/
2371 static llvm::FunctionCallee
getGuardAcquireFn(CodeGenModule
&CGM
,
2372 llvm::PointerType
*GuardPtrTy
) {
2373 // int __cxa_guard_acquire(__guard *guard_object);
2374 llvm::FunctionType
*FTy
=
2375 llvm::FunctionType::get(CGM
.getTypes().ConvertType(CGM
.getContext().IntTy
),
2376 GuardPtrTy
, /*isVarArg=*/false);
2377 return CGM
.CreateRuntimeFunction(
2378 FTy
, "__cxa_guard_acquire",
2379 llvm::AttributeList::get(CGM
.getLLVMContext(),
2380 llvm::AttributeList::FunctionIndex
,
2381 llvm::Attribute::NoUnwind
));
2384 static llvm::FunctionCallee
getGuardReleaseFn(CodeGenModule
&CGM
,
2385 llvm::PointerType
*GuardPtrTy
) {
2386 // void __cxa_guard_release(__guard *guard_object);
2387 llvm::FunctionType
*FTy
=
2388 llvm::FunctionType::get(CGM
.VoidTy
, GuardPtrTy
, /*isVarArg=*/false);
2389 return CGM
.CreateRuntimeFunction(
2390 FTy
, "__cxa_guard_release",
2391 llvm::AttributeList::get(CGM
.getLLVMContext(),
2392 llvm::AttributeList::FunctionIndex
,
2393 llvm::Attribute::NoUnwind
));
2396 static llvm::FunctionCallee
getGuardAbortFn(CodeGenModule
&CGM
,
2397 llvm::PointerType
*GuardPtrTy
) {
2398 // void __cxa_guard_abort(__guard *guard_object);
2399 llvm::FunctionType
*FTy
=
2400 llvm::FunctionType::get(CGM
.VoidTy
, GuardPtrTy
, /*isVarArg=*/false);
2401 return CGM
.CreateRuntimeFunction(
2402 FTy
, "__cxa_guard_abort",
2403 llvm::AttributeList::get(CGM
.getLLVMContext(),
2404 llvm::AttributeList::FunctionIndex
,
2405 llvm::Attribute::NoUnwind
));
2409 struct CallGuardAbort final
: EHScopeStack::Cleanup
{
2410 llvm::GlobalVariable
*Guard
;
2411 CallGuardAbort(llvm::GlobalVariable
*Guard
) : Guard(Guard
) {}
2413 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
2414 CGF
.EmitNounwindRuntimeCall(getGuardAbortFn(CGF
.CGM
, Guard
->getType()),
2420 /// The ARM code here follows the Itanium code closely enough that we
2421 /// just special-case it at particular places.
2422 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction
&CGF
,
2424 llvm::GlobalVariable
*var
,
2425 bool shouldPerformInit
) {
2426 CGBuilderTy
&Builder
= CGF
.Builder
;
2428 // Inline variables that weren't instantiated from variable templates have
2429 // partially-ordered initialization within their translation unit.
2430 bool NonTemplateInline
=
2432 !isTemplateInstantiation(D
.getTemplateSpecializationKind());
2434 // We only need to use thread-safe statics for local non-TLS variables and
2435 // inline variables; other global initialization is always single-threaded
2436 // or (through lazy dynamic loading in multiple threads) unsequenced.
2437 bool threadsafe
= getContext().getLangOpts().ThreadsafeStatics
&&
2438 (D
.isLocalVarDecl() || NonTemplateInline
) &&
2441 // If we have a global variable with internal linkage and thread-safe statics
2442 // are disabled, we can just let the guard variable be of type i8.
2443 bool useInt8GuardVariable
= !threadsafe
&& var
->hasInternalLinkage();
2445 llvm::IntegerType
*guardTy
;
2446 CharUnits guardAlignment
;
2447 if (useInt8GuardVariable
) {
2448 guardTy
= CGF
.Int8Ty
;
2449 guardAlignment
= CharUnits::One();
2451 // Guard variables are 64 bits in the generic ABI and size width on ARM
2452 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2453 if (UseARMGuardVarABI
) {
2454 guardTy
= CGF
.SizeTy
;
2455 guardAlignment
= CGF
.getSizeAlign();
2457 guardTy
= CGF
.Int64Ty
;
2459 CharUnits::fromQuantity(CGM
.getDataLayout().getABITypeAlign(guardTy
));
2462 llvm::PointerType
*guardPtrTy
= llvm::PointerType::get(
2463 CGF
.CGM
.getLLVMContext(),
2464 CGF
.CGM
.getDataLayout().getDefaultGlobalsAddressSpace());
2466 // Create the guard variable if we don't already have it (as we
2467 // might if we're double-emitting this function body).
2468 llvm::GlobalVariable
*guard
= CGM
.getStaticLocalDeclGuardAddress(&D
);
2470 // Mangle the name for the guard.
2471 SmallString
<256> guardName
;
2473 llvm::raw_svector_ostream
out(guardName
);
2474 getMangleContext().mangleStaticGuardVariable(&D
, out
);
2477 // Create the guard variable with a zero-initializer.
2478 // Just absorb linkage, visibility and dll storage class from the guarded
2480 guard
= new llvm::GlobalVariable(CGM
.getModule(), guardTy
,
2481 false, var
->getLinkage(),
2482 llvm::ConstantInt::get(guardTy
, 0),
2484 guard
->setDSOLocal(var
->isDSOLocal());
2485 guard
->setVisibility(var
->getVisibility());
2486 guard
->setDLLStorageClass(var
->getDLLStorageClass());
2487 // If the variable is thread-local, so is its guard variable.
2488 guard
->setThreadLocalMode(var
->getThreadLocalMode());
2489 guard
->setAlignment(guardAlignment
.getAsAlign());
2491 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2492 // group as the associated data object." In practice, this doesn't work for
2493 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2494 llvm::Comdat
*C
= var
->getComdat();
2495 if (!D
.isLocalVarDecl() && C
&&
2496 (CGM
.getTarget().getTriple().isOSBinFormatELF() ||
2497 CGM
.getTarget().getTriple().isOSBinFormatWasm())) {
2498 guard
->setComdat(C
);
2499 } else if (CGM
.supportsCOMDAT() && guard
->isWeakForLinker()) {
2500 guard
->setComdat(CGM
.getModule().getOrInsertComdat(guard
->getName()));
2503 CGM
.setStaticLocalDeclGuardAddress(&D
, guard
);
2506 Address guardAddr
= Address(guard
, guard
->getValueType(), guardAlignment
);
2508 // Test whether the variable has completed initialization.
2510 // Itanium C++ ABI 3.3.2:
2511 // The following is pseudo-code showing how these functions can be used:
2512 // if (obj_guard.first_byte == 0) {
2513 // if ( __cxa_guard_acquire (&obj_guard) ) {
2515 // ... initialize the object ...;
2517 // __cxa_guard_abort (&obj_guard);
2520 // ... queue object destructor with __cxa_atexit() ...;
2521 // __cxa_guard_release (&obj_guard);
2525 // If threadsafe statics are enabled, but we don't have inline atomics, just
2526 // call __cxa_guard_acquire unconditionally. The "inline" check isn't
2527 // actually inline, and the user might not expect calls to __atomic libcalls.
2529 unsigned MaxInlineWidthInBits
= CGF
.getTarget().getMaxAtomicInlineWidth();
2530 llvm::BasicBlock
*EndBlock
= CGF
.createBasicBlock("init.end");
2531 if (!threadsafe
|| MaxInlineWidthInBits
) {
2532 // Load the first byte of the guard variable.
2533 llvm::LoadInst
*LI
=
2534 Builder
.CreateLoad(guardAddr
.withElementType(CGM
.Int8Ty
));
2537 // An implementation supporting thread-safety on multiprocessor
2538 // systems must also guarantee that references to the initialized
2539 // object do not occur before the load of the initialization flag.
2541 // In LLVM, we do this by marking the load Acquire.
2543 LI
->setAtomic(llvm::AtomicOrdering::Acquire
);
2545 // For ARM, we should only check the first bit, rather than the entire byte:
2547 // ARM C++ ABI 3.2.3.1:
2548 // To support the potential use of initialization guard variables
2549 // as semaphores that are the target of ARM SWP and LDREX/STREX
2550 // synchronizing instructions we define a static initialization
2551 // guard variable to be a 4-byte aligned, 4-byte word with the
2552 // following inline access protocol.
2553 // #define INITIALIZED 1
2554 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2555 // if (__cxa_guard_acquire(&obj_guard))
2559 // and similarly for ARM64:
2561 // ARM64 C++ ABI 3.2.2:
2562 // This ABI instead only specifies the value bit 0 of the static guard
2563 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2564 // variable is not initialized and 1 when it is.
2566 (UseARMGuardVarABI
&& !useInt8GuardVariable
)
2567 ? Builder
.CreateAnd(LI
, llvm::ConstantInt::get(CGM
.Int8Ty
, 1))
2569 llvm::Value
*NeedsInit
= Builder
.CreateIsNull(V
, "guard.uninitialized");
2571 llvm::BasicBlock
*InitCheckBlock
= CGF
.createBasicBlock("init.check");
2573 // Check if the first byte of the guard variable is zero.
2574 CGF
.EmitCXXGuardedInitBranch(NeedsInit
, InitCheckBlock
, EndBlock
,
2575 CodeGenFunction::GuardKind::VariableGuard
, &D
);
2577 CGF
.EmitBlock(InitCheckBlock
);
2580 // The semantics of dynamic initialization of variables with static or thread
2581 // storage duration depends on whether they are declared at block-scope. The
2582 // initialization of such variables at block-scope can be aborted with an
2583 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2584 // to their initialization has undefined behavior (also per C++20
2585 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2586 // lead to termination (per C++20 [except.terminate]p1), and recursive
2587 // references to the variables are governed only by the lifetime rules (per
2588 // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2589 // long as they avoid touching memory. As a result, block-scope variables must
2590 // not be marked as initialized until after initialization completes (unless
2591 // the mark is reverted following an exception), but non-block-scope variables
2592 // must be marked prior to initialization so that recursive accesses during
2593 // initialization do not restart initialization.
2595 // Variables used when coping with thread-safe statics and exceptions.
2597 // Call __cxa_guard_acquire.
2599 = CGF
.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM
, guardPtrTy
), guard
);
2601 llvm::BasicBlock
*InitBlock
= CGF
.createBasicBlock("init");
2603 Builder
.CreateCondBr(Builder
.CreateIsNotNull(V
, "tobool"),
2604 InitBlock
, EndBlock
);
2606 // Call __cxa_guard_abort along the exceptional edge.
2607 CGF
.EHStack
.pushCleanup
<CallGuardAbort
>(EHCleanup
, guard
);
2609 CGF
.EmitBlock(InitBlock
);
2610 } else if (!D
.isLocalVarDecl()) {
2611 // For non-local variables, store 1 into the first byte of the guard
2612 // variable before the object initialization begins so that references
2613 // to the variable during initialization don't restart initialization.
2614 Builder
.CreateStore(llvm::ConstantInt::get(CGM
.Int8Ty
, 1),
2615 guardAddr
.withElementType(CGM
.Int8Ty
));
2618 // Emit the initializer and add a global destructor if appropriate.
2619 CGF
.EmitCXXGlobalVarDeclInit(D
, var
, shouldPerformInit
);
2622 // Pop the guard-abort cleanup if we pushed one.
2623 CGF
.PopCleanupBlock();
2625 // Call __cxa_guard_release. This cannot throw.
2626 CGF
.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM
, guardPtrTy
),
2627 guardAddr
.getPointer());
2628 } else if (D
.isLocalVarDecl()) {
2629 // For local variables, store 1 into the first byte of the guard variable
2630 // after the object initialization completes so that initialization is
2631 // retried if initialization is interrupted by an exception.
2632 Builder
.CreateStore(llvm::ConstantInt::get(CGM
.Int8Ty
, 1),
2633 guardAddr
.withElementType(CGM
.Int8Ty
));
2636 CGF
.EmitBlock(EndBlock
);
2639 /// Register a global destructor using __cxa_atexit.
2640 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction
&CGF
,
2641 llvm::FunctionCallee dtor
,
2642 llvm::Constant
*addr
, bool TLS
) {
2643 assert(!CGF
.getTarget().getTriple().isOSAIX() &&
2644 "unexpected call to emitGlobalDtorWithCXAAtExit");
2645 assert((TLS
|| CGF
.getTypes().getCodeGenOpts().CXAAtExit
) &&
2646 "__cxa_atexit is disabled");
2647 const char *Name
= "__cxa_atexit";
2649 const llvm::Triple
&T
= CGF
.getTarget().getTriple();
2650 Name
= T
.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2653 // We're assuming that the destructor function is something we can
2654 // reasonably call with the default CC.
2655 llvm::Type
*dtorTy
= llvm::PointerType::getUnqual(CGF
.getLLVMContext());
2657 // Preserve address space of addr.
2658 auto AddrAS
= addr
? addr
->getType()->getPointerAddressSpace() : 0;
2659 auto AddrPtrTy
= AddrAS
? llvm::PointerType::get(CGF
.getLLVMContext(), AddrAS
)
2662 // Create a variable that binds the atexit to this shared object.
2663 llvm::Constant
*handle
=
2664 CGF
.CGM
.CreateRuntimeVariable(CGF
.Int8Ty
, "__dso_handle");
2665 auto *GV
= cast
<llvm::GlobalValue
>(handle
->stripPointerCasts());
2666 GV
->setVisibility(llvm::GlobalValue::HiddenVisibility
);
2668 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2669 llvm::Type
*paramTys
[] = {dtorTy
, AddrPtrTy
, handle
->getType()};
2670 llvm::FunctionType
*atexitTy
=
2671 llvm::FunctionType::get(CGF
.IntTy
, paramTys
, false);
2673 // Fetch the actual function.
2674 llvm::FunctionCallee atexit
= CGF
.CGM
.CreateRuntimeFunction(atexitTy
, Name
);
2675 if (llvm::Function
*fn
= dyn_cast
<llvm::Function
>(atexit
.getCallee()))
2676 fn
->setDoesNotThrow();
2679 // addr is null when we are trying to register a dtor annotated with
2680 // __attribute__((destructor)) in a constructor function. Using null here is
2681 // okay because this argument is just passed back to the destructor
2683 addr
= llvm::Constant::getNullValue(CGF
.Int8PtrTy
);
2685 llvm::Value
*args
[] = {dtor
.getCallee(), addr
, handle
};
2686 CGF
.EmitNounwindRuntimeCall(atexit
, args
);
2689 static llvm::Function
*createGlobalInitOrCleanupFn(CodeGen::CodeGenModule
&CGM
,
2691 // Create a function that registers/unregisters destructors that have the same
2693 llvm::FunctionType
*FTy
= llvm::FunctionType::get(CGM
.VoidTy
, false);
2694 llvm::Function
*GlobalInitOrCleanupFn
= CGM
.CreateGlobalInitOrCleanUpFunction(
2695 FTy
, FnName
, CGM
.getTypes().arrangeNullaryFunction(), SourceLocation());
2697 return GlobalInitOrCleanupFn
;
2700 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2701 for (const auto &I
: DtorsUsingAtExit
) {
2702 int Priority
= I
.first
;
2703 std::string GlobalCleanupFnName
=
2704 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority
);
2706 llvm::Function
*GlobalCleanupFn
=
2707 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName
);
2709 CodeGenFunction
CGF(*this);
2710 CGF
.StartFunction(GlobalDecl(), getContext().VoidTy
, GlobalCleanupFn
,
2711 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2712 SourceLocation(), SourceLocation());
2713 auto AL
= ApplyDebugLocation::CreateArtificial(CGF
);
2715 // Get the destructor function type, void(*)(void).
2716 llvm::FunctionType
*dtorFuncTy
= llvm::FunctionType::get(CGF
.VoidTy
, false);
2718 // Destructor functions are run/unregistered in non-ascending
2719 // order of their priorities.
2720 const llvm::TinyPtrVector
<llvm::Function
*> &Dtors
= I
.second
;
2721 auto itv
= Dtors
.rbegin();
2722 while (itv
!= Dtors
.rend()) {
2723 llvm::Function
*Dtor
= *itv
;
2725 // We're assuming that the destructor function is something we can
2726 // reasonably call with the correct CC.
2727 llvm::Value
*V
= CGF
.unregisterGlobalDtorWithUnAtExit(Dtor
);
2728 llvm::Value
*NeedsDestruct
=
2729 CGF
.Builder
.CreateIsNull(V
, "needs_destruct");
2731 llvm::BasicBlock
*DestructCallBlock
=
2732 CGF
.createBasicBlock("destruct.call");
2733 llvm::BasicBlock
*EndBlock
= CGF
.createBasicBlock(
2734 (itv
+ 1) != Dtors
.rend() ? "unatexit.call" : "destruct.end");
2735 // Check if unatexit returns a value of 0. If it does, jump to
2736 // DestructCallBlock, otherwise jump to EndBlock directly.
2737 CGF
.Builder
.CreateCondBr(NeedsDestruct
, DestructCallBlock
, EndBlock
);
2739 CGF
.EmitBlock(DestructCallBlock
);
2741 // Emit the call to casted Dtor.
2742 llvm::CallInst
*CI
= CGF
.Builder
.CreateCall(dtorFuncTy
, Dtor
);
2743 // Make sure the call and the callee agree on calling convention.
2744 CI
->setCallingConv(Dtor
->getCallingConv());
2746 CGF
.EmitBlock(EndBlock
);
2751 CGF
.FinishFunction();
2752 AddGlobalDtor(GlobalCleanupFn
, Priority
);
2756 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2757 for (const auto &I
: DtorsUsingAtExit
) {
2758 int Priority
= I
.first
;
2759 std::string GlobalInitFnName
=
2760 std::string("__GLOBAL_init_") + llvm::to_string(Priority
);
2761 llvm::Function
*GlobalInitFn
=
2762 createGlobalInitOrCleanupFn(*this, GlobalInitFnName
);
2764 CodeGenFunction
CGF(*this);
2765 CGF
.StartFunction(GlobalDecl(), getContext().VoidTy
, GlobalInitFn
,
2766 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2767 SourceLocation(), SourceLocation());
2768 auto AL
= ApplyDebugLocation::CreateArtificial(CGF
);
2770 // Since constructor functions are run in non-descending order of their
2771 // priorities, destructors are registered in non-descending order of their
2772 // priorities, and since destructor functions are run in the reverse order
2773 // of their registration, destructor functions are run in non-ascending
2774 // order of their priorities.
2775 const llvm::TinyPtrVector
<llvm::Function
*> &Dtors
= I
.second
;
2776 for (auto *Dtor
: Dtors
) {
2777 // Register the destructor function calling __cxa_atexit if it is
2778 // available. Otherwise fall back on calling atexit.
2779 if (getCodeGenOpts().CXAAtExit
) {
2780 emitGlobalDtorWithCXAAtExit(CGF
, Dtor
, nullptr, false);
2782 // We're assuming that the destructor function is something we can
2783 // reasonably call with the correct CC.
2784 CGF
.registerGlobalDtorWithAtExit(Dtor
);
2788 CGF
.FinishFunction();
2789 AddGlobalCtor(GlobalInitFn
, Priority
);
2792 if (getCXXABI().useSinitAndSterm())
2793 unregisterGlobalDtorsWithUnAtExit();
2796 /// Register a global destructor as best as we know how.
2797 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction
&CGF
, const VarDecl
&D
,
2798 llvm::FunctionCallee dtor
,
2799 llvm::Constant
*addr
) {
2800 if (D
.isNoDestroy(CGM
.getContext()))
2803 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2804 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2805 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2806 // We can always use __cxa_thread_atexit.
2807 if (CGM
.getCodeGenOpts().CXAAtExit
|| D
.getTLSKind())
2808 return emitGlobalDtorWithCXAAtExit(CGF
, dtor
, addr
, D
.getTLSKind());
2810 // In Apple kexts, we want to add a global destructor entry.
2811 // FIXME: shouldn't this be guarded by some variable?
2812 if (CGM
.getLangOpts().AppleKext
) {
2813 // Generate a global destructor entry.
2814 return CGM
.AddCXXDtorEntry(dtor
, addr
);
2817 CGF
.registerGlobalDtorWithAtExit(D
, dtor
, addr
);
2820 static bool isThreadWrapperReplaceable(const VarDecl
*VD
,
2821 CodeGen::CodeGenModule
&CGM
) {
2822 assert(!VD
->isStaticLocal() && "static local VarDecls don't need wrappers!");
2823 // Darwin prefers to have references to thread local variables to go through
2824 // the thread wrapper instead of directly referencing the backing variable.
2825 return VD
->getTLSKind() == VarDecl::TLS_Dynamic
&&
2826 CGM
.getTarget().getTriple().isOSDarwin();
2829 /// Get the appropriate linkage for the wrapper function. This is essentially
2830 /// the weak form of the variable's linkage; every translation unit which needs
2831 /// the wrapper emits a copy, and we want the linker to merge them.
2832 static llvm::GlobalValue::LinkageTypes
2833 getThreadLocalWrapperLinkage(const VarDecl
*VD
, CodeGen::CodeGenModule
&CGM
) {
2834 llvm::GlobalValue::LinkageTypes VarLinkage
=
2835 CGM
.getLLVMLinkageVarDefinition(VD
);
2837 // For internal linkage variables, we don't need an external or weak wrapper.
2838 if (llvm::GlobalValue::isLocalLinkage(VarLinkage
))
2841 // If the thread wrapper is replaceable, give it appropriate linkage.
2842 if (isThreadWrapperReplaceable(VD
, CGM
))
2843 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage
) &&
2844 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage
))
2846 return llvm::GlobalValue::WeakODRLinkage
;
2850 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl
*VD
,
2852 // Mangle the name for the thread_local wrapper function.
2853 SmallString
<256> WrapperName
;
2855 llvm::raw_svector_ostream
Out(WrapperName
);
2856 getMangleContext().mangleItaniumThreadLocalWrapper(VD
, Out
);
2859 // FIXME: If VD is a definition, we should regenerate the function attributes
2860 // before returning.
2861 if (llvm::Value
*V
= CGM
.getModule().getNamedValue(WrapperName
))
2862 return cast
<llvm::Function
>(V
);
2864 QualType RetQT
= VD
->getType();
2865 if (RetQT
->isReferenceType())
2866 RetQT
= RetQT
.getNonReferenceType();
2868 const CGFunctionInfo
&FI
= CGM
.getTypes().arrangeBuiltinFunctionDeclaration(
2869 getContext().getPointerType(RetQT
), FunctionArgList());
2871 llvm::FunctionType
*FnTy
= CGM
.getTypes().GetFunctionType(FI
);
2872 llvm::Function
*Wrapper
=
2873 llvm::Function::Create(FnTy
, getThreadLocalWrapperLinkage(VD
, CGM
),
2874 WrapperName
.str(), &CGM
.getModule());
2876 if (CGM
.supportsCOMDAT() && Wrapper
->isWeakForLinker())
2877 Wrapper
->setComdat(CGM
.getModule().getOrInsertComdat(Wrapper
->getName()));
2879 CGM
.SetLLVMFunctionAttributes(GlobalDecl(), FI
, Wrapper
, /*IsThunk=*/false);
2881 // Always resolve references to the wrapper at link time.
2882 if (!Wrapper
->hasLocalLinkage())
2883 if (!isThreadWrapperReplaceable(VD
, CGM
) ||
2884 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper
->getLinkage()) ||
2885 llvm::GlobalVariable::isWeakODRLinkage(Wrapper
->getLinkage()) ||
2886 VD
->getVisibility() == HiddenVisibility
)
2887 Wrapper
->setVisibility(llvm::GlobalValue::HiddenVisibility
);
2889 if (isThreadWrapperReplaceable(VD
, CGM
)) {
2890 Wrapper
->setCallingConv(llvm::CallingConv::CXX_FAST_TLS
);
2891 Wrapper
->addFnAttr(llvm::Attribute::NoUnwind
);
2894 ThreadWrappers
.push_back({VD
, Wrapper
});
2898 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2899 CodeGenModule
&CGM
, ArrayRef
<const VarDecl
*> CXXThreadLocals
,
2900 ArrayRef
<llvm::Function
*> CXXThreadLocalInits
,
2901 ArrayRef
<const VarDecl
*> CXXThreadLocalInitVars
) {
2902 llvm::Function
*InitFunc
= nullptr;
2904 // Separate initializers into those with ordered (or partially-ordered)
2905 // initialization and those with unordered initialization.
2906 llvm::SmallVector
<llvm::Function
*, 8> OrderedInits
;
2907 llvm::SmallDenseMap
<const VarDecl
*, llvm::Function
*> UnorderedInits
;
2908 for (unsigned I
= 0; I
!= CXXThreadLocalInits
.size(); ++I
) {
2909 if (isTemplateInstantiation(
2910 CXXThreadLocalInitVars
[I
]->getTemplateSpecializationKind()))
2911 UnorderedInits
[CXXThreadLocalInitVars
[I
]->getCanonicalDecl()] =
2912 CXXThreadLocalInits
[I
];
2914 OrderedInits
.push_back(CXXThreadLocalInits
[I
]);
2917 if (!OrderedInits
.empty()) {
2918 // Generate a guarded initialization function.
2919 llvm::FunctionType
*FTy
=
2920 llvm::FunctionType::get(CGM
.VoidTy
, /*isVarArg=*/false);
2921 const CGFunctionInfo
&FI
= CGM
.getTypes().arrangeNullaryFunction();
2922 InitFunc
= CGM
.CreateGlobalInitOrCleanUpFunction(FTy
, "__tls_init", FI
,
2925 llvm::GlobalVariable
*Guard
= new llvm::GlobalVariable(
2926 CGM
.getModule(), CGM
.Int8Ty
, /*isConstant=*/false,
2927 llvm::GlobalVariable::InternalLinkage
,
2928 llvm::ConstantInt::get(CGM
.Int8Ty
, 0), "__tls_guard");
2929 Guard
->setThreadLocal(true);
2930 Guard
->setThreadLocalMode(CGM
.GetDefaultLLVMTLSModel());
2932 CharUnits GuardAlign
= CharUnits::One();
2933 Guard
->setAlignment(GuardAlign
.getAsAlign());
2935 CodeGenFunction(CGM
).GenerateCXXGlobalInitFunc(
2936 InitFunc
, OrderedInits
, ConstantAddress(Guard
, CGM
.Int8Ty
, GuardAlign
));
2937 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2938 if (CGM
.getTarget().getTriple().isOSDarwin()) {
2939 InitFunc
->setCallingConv(llvm::CallingConv::CXX_FAST_TLS
);
2940 InitFunc
->addFnAttr(llvm::Attribute::NoUnwind
);
2944 // Create declarations for thread wrappers for all thread-local variables
2945 // with non-discardable definitions in this translation unit.
2946 for (const VarDecl
*VD
: CXXThreadLocals
) {
2947 if (VD
->hasDefinition() &&
2948 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD
))) {
2949 llvm::GlobalValue
*GV
= CGM
.GetGlobalValue(CGM
.getMangledName(VD
));
2950 getOrCreateThreadLocalWrapper(VD
, GV
);
2954 // Emit all referenced thread wrappers.
2955 for (auto VDAndWrapper
: ThreadWrappers
) {
2956 const VarDecl
*VD
= VDAndWrapper
.first
;
2957 llvm::GlobalVariable
*Var
=
2958 cast
<llvm::GlobalVariable
>(CGM
.GetGlobalValue(CGM
.getMangledName(VD
)));
2959 llvm::Function
*Wrapper
= VDAndWrapper
.second
;
2961 // Some targets require that all access to thread local variables go through
2962 // the thread wrapper. This means that we cannot attempt to create a thread
2963 // wrapper or a thread helper.
2964 if (!VD
->hasDefinition()) {
2965 if (isThreadWrapperReplaceable(VD
, CGM
)) {
2966 Wrapper
->setLinkage(llvm::Function::ExternalLinkage
);
2970 // If this isn't a TU in which this variable is defined, the thread
2971 // wrapper is discardable.
2972 if (Wrapper
->getLinkage() == llvm::Function::WeakODRLinkage
)
2973 Wrapper
->setLinkage(llvm::Function::LinkOnceODRLinkage
);
2976 CGM
.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper
);
2978 // Mangle the name for the thread_local initialization function.
2979 SmallString
<256> InitFnName
;
2981 llvm::raw_svector_ostream
Out(InitFnName
);
2982 getMangleContext().mangleItaniumThreadLocalInit(VD
, Out
);
2985 llvm::FunctionType
*InitFnTy
= llvm::FunctionType::get(CGM
.VoidTy
, false);
2987 // If we have a definition for the variable, emit the initialization
2988 // function as an alias to the global Init function (if any). Otherwise,
2989 // produce a declaration of the initialization function.
2990 llvm::GlobalValue
*Init
= nullptr;
2991 bool InitIsInitFunc
= false;
2992 bool HasConstantInitialization
= false;
2993 if (!usesThreadWrapperFunction(VD
)) {
2994 HasConstantInitialization
= true;
2995 } else if (VD
->hasDefinition()) {
2996 InitIsInitFunc
= true;
2997 llvm::Function
*InitFuncToUse
= InitFunc
;
2998 if (isTemplateInstantiation(VD
->getTemplateSpecializationKind()))
2999 InitFuncToUse
= UnorderedInits
.lookup(VD
->getCanonicalDecl());
3001 Init
= llvm::GlobalAlias::create(Var
->getLinkage(), InitFnName
.str(),
3004 // Emit a weak global function referring to the initialization function.
3005 // This function will not exist if the TU defining the thread_local
3006 // variable in question does not need any dynamic initialization for
3007 // its thread_local variables.
3008 Init
= llvm::Function::Create(InitFnTy
,
3009 llvm::GlobalVariable::ExternalWeakLinkage
,
3010 InitFnName
.str(), &CGM
.getModule());
3011 const CGFunctionInfo
&FI
= CGM
.getTypes().arrangeNullaryFunction();
3012 CGM
.SetLLVMFunctionAttributes(
3013 GlobalDecl(), FI
, cast
<llvm::Function
>(Init
), /*IsThunk=*/false);
3017 Init
->setVisibility(Var
->getVisibility());
3018 // Don't mark an extern_weak function DSO local on windows.
3019 if (!CGM
.getTriple().isOSWindows() || !Init
->hasExternalWeakLinkage())
3020 Init
->setDSOLocal(Var
->isDSOLocal());
3023 llvm::LLVMContext
&Context
= CGM
.getModule().getContext();
3025 // The linker on AIX is not happy with missing weak symbols. However,
3026 // other TUs will not know whether the initialization routine exists
3027 // so create an empty, init function to satisfy the linker.
3028 // This is needed whenever a thread wrapper function is not used, and
3029 // also when the symbol is weak.
3030 if (CGM
.getTriple().isOSAIX() && VD
->hasDefinition() &&
3031 isEmittedWithConstantInitializer(VD
, true) &&
3032 !mayNeedDestruction(VD
)) {
3033 // Init should be null. If it were non-null, then the logic above would
3034 // either be defining the function to be an alias or declaring the
3035 // function with the expectation that the definition of the variable
3037 assert(Init
== nullptr && "Expected Init to be null.");
3039 llvm::Function
*Func
= llvm::Function::Create(
3040 InitFnTy
, Var
->getLinkage(), InitFnName
.str(), &CGM
.getModule());
3041 const CGFunctionInfo
&FI
= CGM
.getTypes().arrangeNullaryFunction();
3042 CGM
.SetLLVMFunctionAttributes(GlobalDecl(), FI
,
3043 cast
<llvm::Function
>(Func
),
3045 // Create a function body that just returns
3046 llvm::BasicBlock
*Entry
= llvm::BasicBlock::Create(Context
, "", Func
);
3047 CGBuilderTy
Builder(CGM
, Entry
);
3048 Builder
.CreateRetVoid();
3051 llvm::BasicBlock
*Entry
= llvm::BasicBlock::Create(Context
, "", Wrapper
);
3052 CGBuilderTy
Builder(CGM
, Entry
);
3053 if (HasConstantInitialization
) {
3054 // No dynamic initialization to invoke.
3055 } else if (InitIsInitFunc
) {
3057 llvm::CallInst
*CallVal
= Builder
.CreateCall(InitFnTy
, Init
);
3058 if (isThreadWrapperReplaceable(VD
, CGM
)) {
3059 CallVal
->setCallingConv(llvm::CallingConv::CXX_FAST_TLS
);
3060 llvm::Function
*Fn
=
3061 cast
<llvm::Function
>(cast
<llvm::GlobalAlias
>(Init
)->getAliasee());
3062 Fn
->setCallingConv(llvm::CallingConv::CXX_FAST_TLS
);
3065 } else if (CGM
.getTriple().isOSAIX()) {
3066 // On AIX, except if constinit and also neither of class type or of
3067 // (possibly multi-dimensional) array of class type, thread_local vars
3068 // will have init routines regardless of whether they are
3069 // const-initialized. Since the routine is guaranteed to exist, we can
3070 // unconditionally call it without testing for its existance. This
3071 // avoids potentially unresolved weak symbols which the AIX linker
3072 // isn't happy with.
3073 Builder
.CreateCall(InitFnTy
, Init
);
3075 // Don't know whether we have an init function. Call it if it exists.
3076 llvm::Value
*Have
= Builder
.CreateIsNotNull(Init
);
3077 llvm::BasicBlock
*InitBB
= llvm::BasicBlock::Create(Context
, "", Wrapper
);
3078 llvm::BasicBlock
*ExitBB
= llvm::BasicBlock::Create(Context
, "", Wrapper
);
3079 Builder
.CreateCondBr(Have
, InitBB
, ExitBB
);
3081 Builder
.SetInsertPoint(InitBB
);
3082 Builder
.CreateCall(InitFnTy
, Init
);
3083 Builder
.CreateBr(ExitBB
);
3085 Builder
.SetInsertPoint(ExitBB
);
3088 // For a reference, the result of the wrapper function is a pointer to
3089 // the referenced object.
3090 llvm::Value
*Val
= Builder
.CreateThreadLocalAddress(Var
);
3092 if (VD
->getType()->isReferenceType()) {
3093 CharUnits Align
= CGM
.getContext().getDeclAlign(VD
);
3094 Val
= Builder
.CreateAlignedLoad(Var
->getValueType(), Val
, Align
);
3096 if (Val
->getType() != Wrapper
->getReturnType())
3097 Val
= Builder
.CreatePointerBitCastOrAddrSpaceCast(
3098 Val
, Wrapper
->getReturnType(), "");
3100 Builder
.CreateRet(Val
);
3104 LValue
ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction
&CGF
,
3106 QualType LValType
) {
3107 llvm::Value
*Val
= CGF
.CGM
.GetAddrOfGlobalVar(VD
);
3108 llvm::Function
*Wrapper
= getOrCreateThreadLocalWrapper(VD
, Val
);
3110 llvm::CallInst
*CallVal
= CGF
.Builder
.CreateCall(Wrapper
);
3111 CallVal
->setCallingConv(Wrapper
->getCallingConv());
3114 if (VD
->getType()->isReferenceType())
3115 LV
= CGF
.MakeNaturalAlignAddrLValue(CallVal
, LValType
);
3117 LV
= CGF
.MakeAddrLValue(CallVal
, LValType
,
3118 CGF
.getContext().getDeclAlign(VD
));
3119 // FIXME: need setObjCGCLValueClass?
3123 /// Return whether the given global decl needs a VTT parameter, which it does
3124 /// if it's a base constructor or destructor with virtual bases.
3125 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD
) {
3126 const CXXMethodDecl
*MD
= cast
<CXXMethodDecl
>(GD
.getDecl());
3128 // We don't have any virtual bases, just return early.
3129 if (!MD
->getParent()->getNumVBases())
3132 // Check if we have a base constructor.
3133 if (isa
<CXXConstructorDecl
>(MD
) && GD
.getCtorType() == Ctor_Base
)
3136 // Check if we have a base destructor.
3137 if (isa
<CXXDestructorDecl
>(MD
) && GD
.getDtorType() == Dtor_Base
)
3144 class ItaniumRTTIBuilder
{
3145 CodeGenModule
&CGM
; // Per-module state.
3146 llvm::LLVMContext
&VMContext
;
3147 const ItaniumCXXABI
&CXXABI
; // Per-module state.
3149 /// Fields - The fields of the RTTI descriptor currently being built.
3150 SmallVector
<llvm::Constant
*, 16> Fields
;
3152 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3153 llvm::GlobalVariable
*
3154 GetAddrOfTypeName(QualType Ty
, llvm::GlobalVariable::LinkageTypes Linkage
);
3156 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3157 /// descriptor of the given type.
3158 llvm::Constant
*GetAddrOfExternalRTTIDescriptor(QualType Ty
);
3160 /// BuildVTablePointer - Build the vtable pointer for the given type.
3161 void BuildVTablePointer(const Type
*Ty
);
3163 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3164 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3165 void BuildSIClassTypeInfo(const CXXRecordDecl
*RD
);
3167 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3168 /// classes with bases that do not satisfy the abi::__si_class_type_info
3169 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3170 void BuildVMIClassTypeInfo(const CXXRecordDecl
*RD
);
3172 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3173 /// for pointer types.
3174 void BuildPointerTypeInfo(QualType PointeeTy
);
3176 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3177 /// type_info for an object type.
3178 void BuildObjCObjectTypeInfo(const ObjCObjectType
*Ty
);
3180 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3181 /// struct, used for member pointer types.
3182 void BuildPointerToMemberTypeInfo(const MemberPointerType
*Ty
);
3185 ItaniumRTTIBuilder(const ItaniumCXXABI
&ABI
)
3186 : CGM(ABI
.CGM
), VMContext(CGM
.getModule().getContext()), CXXABI(ABI
) {}
3188 // Pointer type info flags.
3190 /// PTI_Const - Type has const qualifier.
3193 /// PTI_Volatile - Type has volatile qualifier.
3196 /// PTI_Restrict - Type has restrict qualifier.
3199 /// PTI_Incomplete - Type is incomplete.
3200 PTI_Incomplete
= 0x8,
3202 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3203 /// (in pointer to member).
3204 PTI_ContainingClassIncomplete
= 0x10,
3206 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3207 //PTI_TransactionSafe = 0x20,
3209 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3210 PTI_Noexcept
= 0x40,
3213 // VMI type info flags.
3215 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3216 VMI_NonDiamondRepeat
= 0x1,
3218 /// VMI_DiamondShaped - Class is diamond shaped.
3219 VMI_DiamondShaped
= 0x2
3222 // Base class type info flags.
3224 /// BCTI_Virtual - Base class is virtual.
3227 /// BCTI_Public - Base class is public.
3231 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3232 /// link to an existing RTTI descriptor if one already exists.
3233 llvm::Constant
*BuildTypeInfo(QualType Ty
);
3235 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3236 llvm::Constant
*BuildTypeInfo(
3238 llvm::GlobalVariable::LinkageTypes Linkage
,
3239 llvm::GlobalValue::VisibilityTypes Visibility
,
3240 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass
);
3244 llvm::GlobalVariable
*ItaniumRTTIBuilder::GetAddrOfTypeName(
3245 QualType Ty
, llvm::GlobalVariable::LinkageTypes Linkage
) {
3246 SmallString
<256> Name
;
3247 llvm::raw_svector_ostream
Out(Name
);
3248 CGM
.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty
, Out
);
3250 // We know that the mangled name of the type starts at index 4 of the
3251 // mangled name of the typename, so we can just index into it in order to
3252 // get the mangled name of the type.
3253 llvm::Constant
*Init
= llvm::ConstantDataArray::getString(VMContext
,
3255 auto Align
= CGM
.getContext().getTypeAlignInChars(CGM
.getContext().CharTy
);
3257 llvm::GlobalVariable
*GV
= CGM
.CreateOrReplaceCXXRuntimeVariable(
3258 Name
, Init
->getType(), Linkage
, Align
.getAsAlign());
3260 GV
->setInitializer(Init
);
3266 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty
) {
3267 // Mangle the RTTI name.
3268 SmallString
<256> Name
;
3269 llvm::raw_svector_ostream
Out(Name
);
3270 CGM
.getCXXABI().getMangleContext().mangleCXXRTTI(Ty
, Out
);
3272 // Look for an existing global.
3273 llvm::GlobalVariable
*GV
= CGM
.getModule().getNamedGlobal(Name
);
3276 // Create a new global variable.
3277 // Note for the future: If we would ever like to do deferred emission of
3278 // RTTI, check if emitting vtables opportunistically need any adjustment.
3280 GV
= new llvm::GlobalVariable(
3281 CGM
.getModule(), CGM
.GlobalsInt8PtrTy
,
3282 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage
, nullptr, Name
);
3283 const CXXRecordDecl
*RD
= Ty
->getAsCXXRecordDecl();
3284 CGM
.setGVProperties(GV
, RD
);
3285 // Import the typeinfo symbol when all non-inline virtual methods are
3287 if (CGM
.getTarget().hasPS4DLLImportExport()) {
3288 if (RD
&& CXXRecordAllNonInlineVirtualsHaveAttr
<DLLImportAttr
>(RD
)) {
3289 GV
->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass
);
3290 CGM
.setDSOLocal(GV
);
3298 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3299 /// info for that type is defined in the standard library.
3300 static bool TypeInfoIsInStandardLibrary(const BuiltinType
*Ty
) {
3301 // Itanium C++ ABI 2.9.2:
3302 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3303 // the run-time support library. Specifically, the run-time support
3304 // library should contain type_info objects for the types X, X* and
3305 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3306 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3307 // long, unsigned long, long long, unsigned long long, float, double,
3308 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3309 // half-precision floating point types.
3311 // GCC also emits RTTI for __int128.
3312 // FIXME: We do not emit RTTI information for decimal types here.
3314 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3315 switch (Ty
->getKind()) {
3316 case BuiltinType::Void
:
3317 case BuiltinType::NullPtr
:
3318 case BuiltinType::Bool
:
3319 case BuiltinType::WChar_S
:
3320 case BuiltinType::WChar_U
:
3321 case BuiltinType::Char_U
:
3322 case BuiltinType::Char_S
:
3323 case BuiltinType::UChar
:
3324 case BuiltinType::SChar
:
3325 case BuiltinType::Short
:
3326 case BuiltinType::UShort
:
3327 case BuiltinType::Int
:
3328 case BuiltinType::UInt
:
3329 case BuiltinType::Long
:
3330 case BuiltinType::ULong
:
3331 case BuiltinType::LongLong
:
3332 case BuiltinType::ULongLong
:
3333 case BuiltinType::Half
:
3334 case BuiltinType::Float
:
3335 case BuiltinType::Double
:
3336 case BuiltinType::LongDouble
:
3337 case BuiltinType::Float16
:
3338 case BuiltinType::Float128
:
3339 case BuiltinType::Ibm128
:
3340 case BuiltinType::Char8
:
3341 case BuiltinType::Char16
:
3342 case BuiltinType::Char32
:
3343 case BuiltinType::Int128
:
3344 case BuiltinType::UInt128
:
3347 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3348 case BuiltinType::Id:
3349 #include "clang/Basic/OpenCLImageTypes.def"
3350 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3351 case BuiltinType::Id:
3352 #include "clang/Basic/OpenCLExtensionTypes.def"
3353 case BuiltinType::OCLSampler
:
3354 case BuiltinType::OCLEvent
:
3355 case BuiltinType::OCLClkEvent
:
3356 case BuiltinType::OCLQueue
:
3357 case BuiltinType::OCLReserveID
:
3358 #define SVE_TYPE(Name, Id, SingletonId) \
3359 case BuiltinType::Id:
3360 #include "clang/Basic/AArch64SVEACLETypes.def"
3361 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3362 case BuiltinType::Id:
3363 #include "clang/Basic/PPCTypes.def"
3364 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3365 #include "clang/Basic/RISCVVTypes.def"
3366 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3367 #include "clang/Basic/WebAssemblyReferenceTypes.def"
3368 case BuiltinType::ShortAccum
:
3369 case BuiltinType::Accum
:
3370 case BuiltinType::LongAccum
:
3371 case BuiltinType::UShortAccum
:
3372 case BuiltinType::UAccum
:
3373 case BuiltinType::ULongAccum
:
3374 case BuiltinType::ShortFract
:
3375 case BuiltinType::Fract
:
3376 case BuiltinType::LongFract
:
3377 case BuiltinType::UShortFract
:
3378 case BuiltinType::UFract
:
3379 case BuiltinType::ULongFract
:
3380 case BuiltinType::SatShortAccum
:
3381 case BuiltinType::SatAccum
:
3382 case BuiltinType::SatLongAccum
:
3383 case BuiltinType::SatUShortAccum
:
3384 case BuiltinType::SatUAccum
:
3385 case BuiltinType::SatULongAccum
:
3386 case BuiltinType::SatShortFract
:
3387 case BuiltinType::SatFract
:
3388 case BuiltinType::SatLongFract
:
3389 case BuiltinType::SatUShortFract
:
3390 case BuiltinType::SatUFract
:
3391 case BuiltinType::SatULongFract
:
3392 case BuiltinType::BFloat16
:
3395 case BuiltinType::Dependent
:
3396 #define BUILTIN_TYPE(Id, SingletonId)
3397 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3398 case BuiltinType::Id:
3399 #include "clang/AST/BuiltinTypes.def"
3400 llvm_unreachable("asking for RRTI for a placeholder type!");
3402 case BuiltinType::ObjCId
:
3403 case BuiltinType::ObjCClass
:
3404 case BuiltinType::ObjCSel
:
3405 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3408 llvm_unreachable("Invalid BuiltinType Kind!");
3411 static bool TypeInfoIsInStandardLibrary(const PointerType
*PointerTy
) {
3412 QualType PointeeTy
= PointerTy
->getPointeeType();
3413 const BuiltinType
*BuiltinTy
= dyn_cast
<BuiltinType
>(PointeeTy
);
3417 // Check the qualifiers.
3418 Qualifiers Quals
= PointeeTy
.getQualifiers();
3419 Quals
.removeConst();
3424 return TypeInfoIsInStandardLibrary(BuiltinTy
);
3427 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3428 /// information for the given type exists in the standard library.
3429 static bool IsStandardLibraryRTTIDescriptor(QualType Ty
) {
3430 // Type info for builtin types is defined in the standard library.
3431 if (const BuiltinType
*BuiltinTy
= dyn_cast
<BuiltinType
>(Ty
))
3432 return TypeInfoIsInStandardLibrary(BuiltinTy
);
3434 // Type info for some pointer types to builtin types is defined in the
3435 // standard library.
3436 if (const PointerType
*PointerTy
= dyn_cast
<PointerType
>(Ty
))
3437 return TypeInfoIsInStandardLibrary(PointerTy
);
3442 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3443 /// the given type exists somewhere else, and that we should not emit the type
3444 /// information in this translation unit. Assumes that it is not a
3445 /// standard-library type.
3446 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule
&CGM
,
3448 ASTContext
&Context
= CGM
.getContext();
3450 // If RTTI is disabled, assume it might be disabled in the
3451 // translation unit that defines any potential key function, too.
3452 if (!Context
.getLangOpts().RTTI
) return false;
3454 if (const RecordType
*RecordTy
= dyn_cast
<RecordType
>(Ty
)) {
3455 const CXXRecordDecl
*RD
= cast
<CXXRecordDecl
>(RecordTy
->getDecl());
3456 if (!RD
->hasDefinition())
3459 if (!RD
->isDynamicClass())
3462 // FIXME: this may need to be reconsidered if the key function
3464 // N.B. We must always emit the RTTI data ourselves if there exists a key
3466 bool IsDLLImport
= RD
->hasAttr
<DLLImportAttr
>();
3468 // Don't import the RTTI but emit it locally.
3469 if (CGM
.getTriple().isWindowsGNUEnvironment())
3472 if (CGM
.getVTables().isVTableExternal(RD
)) {
3473 if (CGM
.getTarget().hasPS4DLLImportExport())
3476 return IsDLLImport
&& !CGM
.getTriple().isWindowsItaniumEnvironment()
3487 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
3488 static bool IsIncompleteClassType(const RecordType
*RecordTy
) {
3489 return !RecordTy
->getDecl()->isCompleteDefinition();
3492 /// ContainsIncompleteClassType - Returns whether the given type contains an
3493 /// incomplete class type. This is true if
3495 /// * The given type is an incomplete class type.
3496 /// * The given type is a pointer type whose pointee type contains an
3497 /// incomplete class type.
3498 /// * The given type is a member pointer type whose class is an incomplete
3500 /// * The given type is a member pointer type whoise pointee type contains an
3501 /// incomplete class type.
3502 /// is an indirect or direct pointer to an incomplete class type.
3503 static bool ContainsIncompleteClassType(QualType Ty
) {
3504 if (const RecordType
*RecordTy
= dyn_cast
<RecordType
>(Ty
)) {
3505 if (IsIncompleteClassType(RecordTy
))
3509 if (const PointerType
*PointerTy
= dyn_cast
<PointerType
>(Ty
))
3510 return ContainsIncompleteClassType(PointerTy
->getPointeeType());
3512 if (const MemberPointerType
*MemberPointerTy
=
3513 dyn_cast
<MemberPointerType
>(Ty
)) {
3514 // Check if the class type is incomplete.
3515 const RecordType
*ClassType
= cast
<RecordType
>(MemberPointerTy
->getClass());
3516 if (IsIncompleteClassType(ClassType
))
3519 return ContainsIncompleteClassType(MemberPointerTy
->getPointeeType());
3525 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3526 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3527 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3528 static bool CanUseSingleInheritance(const CXXRecordDecl
*RD
) {
3529 // Check the number of bases.
3530 if (RD
->getNumBases() != 1)
3534 CXXRecordDecl::base_class_const_iterator Base
= RD
->bases_begin();
3536 // Check that the base is not virtual.
3537 if (Base
->isVirtual())
3540 // Check that the base is public.
3541 if (Base
->getAccessSpecifier() != AS_public
)
3544 // Check that the class is dynamic iff the base is.
3546 cast
<CXXRecordDecl
>(Base
->getType()->castAs
<RecordType
>()->getDecl());
3547 if (!BaseDecl
->isEmpty() &&
3548 BaseDecl
->isDynamicClass() != RD
->isDynamicClass())
3554 void ItaniumRTTIBuilder::BuildVTablePointer(const Type
*Ty
) {
3555 // abi::__class_type_info.
3556 static const char * const ClassTypeInfo
=
3557 "_ZTVN10__cxxabiv117__class_type_infoE";
3558 // abi::__si_class_type_info.
3559 static const char * const SIClassTypeInfo
=
3560 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3561 // abi::__vmi_class_type_info.
3562 static const char * const VMIClassTypeInfo
=
3563 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3565 const char *VTableName
= nullptr;
3567 switch (Ty
->getTypeClass()) {
3568 #define TYPE(Class, Base)
3569 #define ABSTRACT_TYPE(Class, Base)
3570 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3571 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3572 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3573 #include "clang/AST/TypeNodes.inc"
3574 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3576 case Type::LValueReference
:
3577 case Type::RValueReference
:
3578 llvm_unreachable("References shouldn't get here");
3581 case Type::DeducedTemplateSpecialization
:
3582 llvm_unreachable("Undeduced type shouldn't get here");
3585 llvm_unreachable("Pipe types shouldn't get here");
3589 // GCC treats vector and complex types as fundamental types.
3591 case Type::ExtVector
:
3592 case Type::ConstantMatrix
:
3595 // FIXME: GCC treats block pointers as fundamental types?!
3596 case Type::BlockPointer
:
3597 // abi::__fundamental_type_info.
3598 VTableName
= "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3601 case Type::ConstantArray
:
3602 case Type::IncompleteArray
:
3603 case Type::VariableArray
:
3604 // abi::__array_type_info.
3605 VTableName
= "_ZTVN10__cxxabiv117__array_type_infoE";
3608 case Type::FunctionNoProto
:
3609 case Type::FunctionProto
:
3610 // abi::__function_type_info.
3611 VTableName
= "_ZTVN10__cxxabiv120__function_type_infoE";
3615 // abi::__enum_type_info.
3616 VTableName
= "_ZTVN10__cxxabiv116__enum_type_infoE";
3619 case Type::Record
: {
3620 const CXXRecordDecl
*RD
=
3621 cast
<CXXRecordDecl
>(cast
<RecordType
>(Ty
)->getDecl());
3623 if (!RD
->hasDefinition() || !RD
->getNumBases()) {
3624 VTableName
= ClassTypeInfo
;
3625 } else if (CanUseSingleInheritance(RD
)) {
3626 VTableName
= SIClassTypeInfo
;
3628 VTableName
= VMIClassTypeInfo
;
3634 case Type::ObjCObject
:
3635 // Ignore protocol qualifiers.
3636 Ty
= cast
<ObjCObjectType
>(Ty
)->getBaseType().getTypePtr();
3638 // Handle id and Class.
3639 if (isa
<BuiltinType
>(Ty
)) {
3640 VTableName
= ClassTypeInfo
;
3644 assert(isa
<ObjCInterfaceType
>(Ty
));
3647 case Type::ObjCInterface
:
3648 if (cast
<ObjCInterfaceType
>(Ty
)->getDecl()->getSuperClass()) {
3649 VTableName
= SIClassTypeInfo
;
3651 VTableName
= ClassTypeInfo
;
3655 case Type::ObjCObjectPointer
:
3657 // abi::__pointer_type_info.
3658 VTableName
= "_ZTVN10__cxxabiv119__pointer_type_infoE";
3661 case Type::MemberPointer
:
3662 // abi::__pointer_to_member_type_info.
3663 VTableName
= "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3667 llvm::Constant
*VTable
= nullptr;
3669 // Check if the alias exists. If it doesn't, then get or create the global.
3670 if (CGM
.getItaniumVTableContext().isRelativeLayout())
3671 VTable
= CGM
.getModule().getNamedAlias(VTableName
);
3674 CGM
.getModule().getOrInsertGlobal(VTableName
, CGM
.GlobalsInt8PtrTy
);
3676 CGM
.setDSOLocal(cast
<llvm::GlobalValue
>(VTable
->stripPointerCasts()));
3678 llvm::Type
*PtrDiffTy
=
3679 CGM
.getTypes().ConvertType(CGM
.getContext().getPointerDiffType());
3681 // The vtable address point is 2.
3682 if (CGM
.getItaniumVTableContext().isRelativeLayout()) {
3683 // The vtable address point is 8 bytes after its start:
3684 // 4 for the offset to top + 4 for the relative offset to rtti.
3685 llvm::Constant
*Eight
= llvm::ConstantInt::get(CGM
.Int32Ty
, 8);
3687 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM
.Int8Ty
, VTable
, Eight
);
3689 llvm::Constant
*Two
= llvm::ConstantInt::get(PtrDiffTy
, 2);
3690 VTable
= llvm::ConstantExpr::getInBoundsGetElementPtr(CGM
.GlobalsInt8PtrTy
,
3694 Fields
.push_back(VTable
);
3697 /// Return the linkage that the type info and type info name constants
3698 /// should have for the given type.
3699 static llvm::GlobalVariable::LinkageTypes
getTypeInfoLinkage(CodeGenModule
&CGM
,
3701 // Itanium C++ ABI 2.9.5p7:
3702 // In addition, it and all of the intermediate abi::__pointer_type_info
3703 // structs in the chain down to the abi::__class_type_info for the
3704 // incomplete class type must be prevented from resolving to the
3705 // corresponding type_info structs for the complete class type, possibly
3706 // by making them local static objects. Finally, a dummy class RTTI is
3707 // generated for the incomplete type that will not resolve to the final
3708 // complete class RTTI (because the latter need not exist), possibly by
3709 // making it a local static object.
3710 if (ContainsIncompleteClassType(Ty
))
3711 return llvm::GlobalValue::InternalLinkage
;
3713 switch (Ty
->getLinkage()) {
3715 case InternalLinkage
:
3716 case UniqueExternalLinkage
:
3717 return llvm::GlobalValue::InternalLinkage
;
3719 case VisibleNoLinkage
:
3721 case ExternalLinkage
:
3722 // RTTI is not enabled, which means that this type info struct is going
3723 // to be used for exception handling. Give it linkonce_odr linkage.
3724 if (!CGM
.getLangOpts().RTTI
)
3725 return llvm::GlobalValue::LinkOnceODRLinkage
;
3727 if (const RecordType
*Record
= dyn_cast
<RecordType
>(Ty
)) {
3728 const CXXRecordDecl
*RD
= cast
<CXXRecordDecl
>(Record
->getDecl());
3729 if (RD
->hasAttr
<WeakAttr
>())
3730 return llvm::GlobalValue::WeakODRLinkage
;
3731 if (CGM
.getTriple().isWindowsItaniumEnvironment())
3732 if (RD
->hasAttr
<DLLImportAttr
>() &&
3733 ShouldUseExternalRTTIDescriptor(CGM
, Ty
))
3734 return llvm::GlobalValue::ExternalLinkage
;
3735 // MinGW always uses LinkOnceODRLinkage for type info.
3736 if (RD
->isDynamicClass() &&
3740 .isWindowsGNUEnvironment())
3741 return CGM
.getVTableLinkage(RD
);
3744 return llvm::GlobalValue::LinkOnceODRLinkage
;
3747 llvm_unreachable("Invalid linkage!");
3750 llvm::Constant
*ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty
) {
3751 // We want to operate on the canonical type.
3752 Ty
= Ty
.getCanonicalType();
3754 // Check if we've already emitted an RTTI descriptor for this type.
3755 SmallString
<256> Name
;
3756 llvm::raw_svector_ostream
Out(Name
);
3757 CGM
.getCXXABI().getMangleContext().mangleCXXRTTI(Ty
, Out
);
3759 llvm::GlobalVariable
*OldGV
= CGM
.getModule().getNamedGlobal(Name
);
3760 if (OldGV
&& !OldGV
->isDeclaration()) {
3761 assert(!OldGV
->hasAvailableExternallyLinkage() &&
3762 "available_externally typeinfos not yet implemented");
3767 // Check if there is already an external RTTI descriptor for this type.
3768 if (IsStandardLibraryRTTIDescriptor(Ty
) ||
3769 ShouldUseExternalRTTIDescriptor(CGM
, Ty
))
3770 return GetAddrOfExternalRTTIDescriptor(Ty
);
3772 // Emit the standard library with external linkage.
3773 llvm::GlobalVariable::LinkageTypes Linkage
= getTypeInfoLinkage(CGM
, Ty
);
3775 // Give the type_info object and name the formal visibility of the
3777 llvm::GlobalValue::VisibilityTypes llvmVisibility
;
3778 if (llvm::GlobalValue::isLocalLinkage(Linkage
))
3779 // If the linkage is local, only default visibility makes sense.
3780 llvmVisibility
= llvm::GlobalValue::DefaultVisibility
;
3781 else if (CXXABI
.classifyRTTIUniqueness(Ty
, Linkage
) ==
3782 ItaniumCXXABI::RUK_NonUniqueHidden
)
3783 llvmVisibility
= llvm::GlobalValue::HiddenVisibility
;
3785 llvmVisibility
= CodeGenModule::GetLLVMVisibility(Ty
->getVisibility());
3787 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass
=
3788 llvm::GlobalValue::DefaultStorageClass
;
3789 if (auto RD
= Ty
->getAsCXXRecordDecl()) {
3790 if ((CGM
.getTriple().isWindowsItaniumEnvironment() &&
3791 RD
->hasAttr
<DLLExportAttr
>()) ||
3792 (CGM
.shouldMapVisibilityToDLLExport(RD
) &&
3793 !llvm::GlobalValue::isLocalLinkage(Linkage
) &&
3794 llvmVisibility
== llvm::GlobalValue::DefaultVisibility
))
3795 DLLStorageClass
= llvm::GlobalValue::DLLExportStorageClass
;
3797 return BuildTypeInfo(Ty
, Linkage
, llvmVisibility
, DLLStorageClass
);
3800 llvm::Constant
*ItaniumRTTIBuilder::BuildTypeInfo(
3802 llvm::GlobalVariable::LinkageTypes Linkage
,
3803 llvm::GlobalValue::VisibilityTypes Visibility
,
3804 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass
) {
3805 // Add the vtable pointer.
3806 BuildVTablePointer(cast
<Type
>(Ty
));
3809 llvm::GlobalVariable
*TypeName
= GetAddrOfTypeName(Ty
, Linkage
);
3810 llvm::Constant
*TypeNameField
;
3812 // If we're supposed to demote the visibility, be sure to set a flag
3813 // to use a string comparison for type_info comparisons.
3814 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness
=
3815 CXXABI
.classifyRTTIUniqueness(Ty
, Linkage
);
3816 if (RTTIUniqueness
!= ItaniumCXXABI::RUK_Unique
) {
3817 // The flag is the sign bit, which on ARM64 is defined to be clear
3818 // for global pointers. This is very ARM64-specific.
3819 TypeNameField
= llvm::ConstantExpr::getPtrToInt(TypeName
, CGM
.Int64Ty
);
3820 llvm::Constant
*flag
=
3821 llvm::ConstantInt::get(CGM
.Int64Ty
, ((uint64_t)1) << 63);
3822 TypeNameField
= llvm::ConstantExpr::getAdd(TypeNameField
, flag
);
3824 llvm::ConstantExpr::getIntToPtr(TypeNameField
, CGM
.GlobalsInt8PtrTy
);
3826 TypeNameField
= TypeName
;
3828 Fields
.push_back(TypeNameField
);
3830 switch (Ty
->getTypeClass()) {
3831 #define TYPE(Class, Base)
3832 #define ABSTRACT_TYPE(Class, Base)
3833 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3834 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3835 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3836 #include "clang/AST/TypeNodes.inc"
3837 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3839 // GCC treats vector types as fundamental types.
3842 case Type::ExtVector
:
3843 case Type::ConstantMatrix
:
3845 case Type::BlockPointer
:
3846 // Itanium C++ ABI 2.9.5p4:
3847 // abi::__fundamental_type_info adds no data members to std::type_info.
3850 case Type::LValueReference
:
3851 case Type::RValueReference
:
3852 llvm_unreachable("References shouldn't get here");
3855 case Type::DeducedTemplateSpecialization
:
3856 llvm_unreachable("Undeduced type shouldn't get here");
3864 case Type::ConstantArray
:
3865 case Type::IncompleteArray
:
3866 case Type::VariableArray
:
3867 // Itanium C++ ABI 2.9.5p5:
3868 // abi::__array_type_info adds no data members to std::type_info.
3871 case Type::FunctionNoProto
:
3872 case Type::FunctionProto
:
3873 // Itanium C++ ABI 2.9.5p5:
3874 // abi::__function_type_info adds no data members to std::type_info.
3878 // Itanium C++ ABI 2.9.5p5:
3879 // abi::__enum_type_info adds no data members to std::type_info.
3882 case Type::Record
: {
3883 const CXXRecordDecl
*RD
=
3884 cast
<CXXRecordDecl
>(cast
<RecordType
>(Ty
)->getDecl());
3885 if (!RD
->hasDefinition() || !RD
->getNumBases()) {
3886 // We don't need to emit any fields.
3890 if (CanUseSingleInheritance(RD
))
3891 BuildSIClassTypeInfo(RD
);
3893 BuildVMIClassTypeInfo(RD
);
3898 case Type::ObjCObject
:
3899 case Type::ObjCInterface
:
3900 BuildObjCObjectTypeInfo(cast
<ObjCObjectType
>(Ty
));
3903 case Type::ObjCObjectPointer
:
3904 BuildPointerTypeInfo(cast
<ObjCObjectPointerType
>(Ty
)->getPointeeType());
3908 BuildPointerTypeInfo(cast
<PointerType
>(Ty
)->getPointeeType());
3911 case Type::MemberPointer
:
3912 BuildPointerToMemberTypeInfo(cast
<MemberPointerType
>(Ty
));
3916 // No fields, at least for the moment.
3920 llvm::Constant
*Init
= llvm::ConstantStruct::getAnon(Fields
);
3922 SmallString
<256> Name
;
3923 llvm::raw_svector_ostream
Out(Name
);
3924 CGM
.getCXXABI().getMangleContext().mangleCXXRTTI(Ty
, Out
);
3925 llvm::Module
&M
= CGM
.getModule();
3926 llvm::GlobalVariable
*OldGV
= M
.getNamedGlobal(Name
);
3927 llvm::GlobalVariable
*GV
=
3928 new llvm::GlobalVariable(M
, Init
->getType(),
3929 /*isConstant=*/true, Linkage
, Init
, Name
);
3931 // Export the typeinfo in the same circumstances as the vtable is exported.
3932 auto GVDLLStorageClass
= DLLStorageClass
;
3933 if (CGM
.getTarget().hasPS4DLLImportExport()) {
3934 if (const RecordType
*RecordTy
= dyn_cast
<RecordType
>(Ty
)) {
3935 const CXXRecordDecl
*RD
= cast
<CXXRecordDecl
>(RecordTy
->getDecl());
3936 if (RD
->hasAttr
<DLLExportAttr
>() ||
3937 CXXRecordAllNonInlineVirtualsHaveAttr
<DLLExportAttr
>(RD
)) {
3938 GVDLLStorageClass
= llvm::GlobalVariable::DLLExportStorageClass
;
3943 // If there's already an old global variable, replace it with the new one.
3945 GV
->takeName(OldGV
);
3946 llvm::Constant
*NewPtr
=
3947 llvm::ConstantExpr::getBitCast(GV
, OldGV
->getType());
3948 OldGV
->replaceAllUsesWith(NewPtr
);
3949 OldGV
->eraseFromParent();
3952 if (CGM
.supportsCOMDAT() && GV
->isWeakForLinker())
3953 GV
->setComdat(M
.getOrInsertComdat(GV
->getName()));
3955 CharUnits Align
= CGM
.getContext().toCharUnitsFromBits(
3956 CGM
.getTarget().getPointerAlign(CGM
.GetGlobalVarAddressSpace(nullptr)));
3957 GV
->setAlignment(Align
.getAsAlign());
3959 // The Itanium ABI specifies that type_info objects must be globally
3960 // unique, with one exception: if the type is an incomplete class
3961 // type or a (possibly indirect) pointer to one. That exception
3962 // affects the general case of comparing type_info objects produced
3963 // by the typeid operator, which is why the comparison operators on
3964 // std::type_info generally use the type_info name pointers instead
3965 // of the object addresses. However, the language's built-in uses
3966 // of RTTI generally require class types to be complete, even when
3967 // manipulating pointers to those class types. This allows the
3968 // implementation of dynamic_cast to rely on address equality tests,
3969 // which is much faster.
3971 // All of this is to say that it's important that both the type_info
3972 // object and the type_info name be uniqued when weakly emitted.
3974 TypeName
->setVisibility(Visibility
);
3975 CGM
.setDSOLocal(TypeName
);
3977 GV
->setVisibility(Visibility
);
3978 CGM
.setDSOLocal(GV
);
3980 TypeName
->setDLLStorageClass(DLLStorageClass
);
3981 GV
->setDLLStorageClass(CGM
.getTarget().hasPS4DLLImportExport()
3985 TypeName
->setPartition(CGM
.getCodeGenOpts().SymbolPartition
);
3986 GV
->setPartition(CGM
.getCodeGenOpts().SymbolPartition
);
3991 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3992 /// for the given Objective-C object type.
3993 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType
*OT
) {
3995 const Type
*T
= OT
->getBaseType().getTypePtr();
3996 assert(isa
<BuiltinType
>(T
) || isa
<ObjCInterfaceType
>(T
));
3998 // The builtin types are abi::__class_type_infos and don't require
4000 if (isa
<BuiltinType
>(T
)) return;
4002 ObjCInterfaceDecl
*Class
= cast
<ObjCInterfaceType
>(T
)->getDecl();
4003 ObjCInterfaceDecl
*Super
= Class
->getSuperClass();
4005 // Root classes are also __class_type_info.
4008 QualType SuperTy
= CGM
.getContext().getObjCInterfaceType(Super
);
4010 // Everything else is single inheritance.
4011 llvm::Constant
*BaseTypeInfo
=
4012 ItaniumRTTIBuilder(CXXABI
).BuildTypeInfo(SuperTy
);
4013 Fields
.push_back(BaseTypeInfo
);
4016 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
4017 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
4018 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl
*RD
) {
4019 // Itanium C++ ABI 2.9.5p6b:
4020 // It adds to abi::__class_type_info a single member pointing to the
4021 // type_info structure for the base type,
4022 llvm::Constant
*BaseTypeInfo
=
4023 ItaniumRTTIBuilder(CXXABI
).BuildTypeInfo(RD
->bases_begin()->getType());
4024 Fields
.push_back(BaseTypeInfo
);
4028 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
4029 /// a class hierarchy.
4031 llvm::SmallPtrSet
<const CXXRecordDecl
*, 16> NonVirtualBases
;
4032 llvm::SmallPtrSet
<const CXXRecordDecl
*, 16> VirtualBases
;
4036 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
4037 /// abi::__vmi_class_type_info.
4039 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier
*Base
,
4045 cast
<CXXRecordDecl
>(Base
->getType()->castAs
<RecordType
>()->getDecl());
4047 if (Base
->isVirtual()) {
4048 // Mark the virtual base as seen.
4049 if (!Bases
.VirtualBases
.insert(BaseDecl
).second
) {
4050 // If this virtual base has been seen before, then the class is diamond
4052 Flags
|= ItaniumRTTIBuilder::VMI_DiamondShaped
;
4054 if (Bases
.NonVirtualBases
.count(BaseDecl
))
4055 Flags
|= ItaniumRTTIBuilder::VMI_NonDiamondRepeat
;
4058 // Mark the non-virtual base as seen.
4059 if (!Bases
.NonVirtualBases
.insert(BaseDecl
).second
) {
4060 // If this non-virtual base has been seen before, then the class has non-
4061 // diamond shaped repeated inheritance.
4062 Flags
|= ItaniumRTTIBuilder::VMI_NonDiamondRepeat
;
4064 if (Bases
.VirtualBases
.count(BaseDecl
))
4065 Flags
|= ItaniumRTTIBuilder::VMI_NonDiamondRepeat
;
4070 for (const auto &I
: BaseDecl
->bases())
4071 Flags
|= ComputeVMIClassTypeInfoFlags(&I
, Bases
);
4076 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl
*RD
) {
4081 for (const auto &I
: RD
->bases())
4082 Flags
|= ComputeVMIClassTypeInfoFlags(&I
, Bases
);
4087 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4088 /// classes with bases that do not satisfy the abi::__si_class_type_info
4089 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4090 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl
*RD
) {
4091 llvm::Type
*UnsignedIntLTy
=
4092 CGM
.getTypes().ConvertType(CGM
.getContext().UnsignedIntTy
);
4094 // Itanium C++ ABI 2.9.5p6c:
4095 // __flags is a word with flags describing details about the class
4096 // structure, which may be referenced by using the __flags_masks
4097 // enumeration. These flags refer to both direct and indirect bases.
4098 unsigned Flags
= ComputeVMIClassTypeInfoFlags(RD
);
4099 Fields
.push_back(llvm::ConstantInt::get(UnsignedIntLTy
, Flags
));
4101 // Itanium C++ ABI 2.9.5p6c:
4102 // __base_count is a word with the number of direct proper base class
4103 // descriptions that follow.
4104 Fields
.push_back(llvm::ConstantInt::get(UnsignedIntLTy
, RD
->getNumBases()));
4106 if (!RD
->getNumBases())
4109 // Now add the base class descriptions.
4111 // Itanium C++ ABI 2.9.5p6c:
4112 // __base_info[] is an array of base class descriptions -- one for every
4113 // direct proper base. Each description is of the type:
4115 // struct abi::__base_class_type_info {
4117 // const __class_type_info *__base_type;
4118 // long __offset_flags;
4120 // enum __offset_flags_masks {
4121 // __virtual_mask = 0x1,
4122 // __public_mask = 0x2,
4123 // __offset_shift = 8
4127 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4128 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4130 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4132 QualType OffsetFlagsTy
= CGM
.getContext().LongTy
;
4133 const TargetInfo
&TI
= CGM
.getContext().getTargetInfo();
4134 if (TI
.getTriple().isOSCygMing() &&
4135 TI
.getPointerWidth(LangAS::Default
) > TI
.getLongWidth())
4136 OffsetFlagsTy
= CGM
.getContext().LongLongTy
;
4137 llvm::Type
*OffsetFlagsLTy
=
4138 CGM
.getTypes().ConvertType(OffsetFlagsTy
);
4140 for (const auto &Base
: RD
->bases()) {
4141 // The __base_type member points to the RTTI for the base type.
4142 Fields
.push_back(ItaniumRTTIBuilder(CXXABI
).BuildTypeInfo(Base
.getType()));
4145 cast
<CXXRecordDecl
>(Base
.getType()->castAs
<RecordType
>()->getDecl());
4147 int64_t OffsetFlags
= 0;
4149 // All but the lower 8 bits of __offset_flags are a signed offset.
4150 // For a non-virtual base, this is the offset in the object of the base
4151 // subobject. For a virtual base, this is the offset in the virtual table of
4152 // the virtual base offset for the virtual base referenced (negative).
4154 if (Base
.isVirtual())
4156 CGM
.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD
, BaseDecl
);
4158 const ASTRecordLayout
&Layout
= CGM
.getContext().getASTRecordLayout(RD
);
4159 Offset
= Layout
.getBaseClassOffset(BaseDecl
);
4162 OffsetFlags
= uint64_t(Offset
.getQuantity()) << 8;
4164 // The low-order byte of __offset_flags contains flags, as given by the
4165 // masks from the enumeration __offset_flags_masks.
4166 if (Base
.isVirtual())
4167 OffsetFlags
|= BCTI_Virtual
;
4168 if (Base
.getAccessSpecifier() == AS_public
)
4169 OffsetFlags
|= BCTI_Public
;
4171 Fields
.push_back(llvm::ConstantInt::get(OffsetFlagsLTy
, OffsetFlags
));
4175 /// Compute the flags for a __pbase_type_info, and remove the corresponding
4176 /// pieces from \p Type.
4177 static unsigned extractPBaseFlags(ASTContext
&Ctx
, QualType
&Type
) {
4180 if (Type
.isConstQualified())
4181 Flags
|= ItaniumRTTIBuilder::PTI_Const
;
4182 if (Type
.isVolatileQualified())
4183 Flags
|= ItaniumRTTIBuilder::PTI_Volatile
;
4184 if (Type
.isRestrictQualified())
4185 Flags
|= ItaniumRTTIBuilder::PTI_Restrict
;
4186 Type
= Type
.getUnqualifiedType();
4188 // Itanium C++ ABI 2.9.5p7:
4189 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4190 // incomplete class type, the incomplete target type flag is set.
4191 if (ContainsIncompleteClassType(Type
))
4192 Flags
|= ItaniumRTTIBuilder::PTI_Incomplete
;
4194 if (auto *Proto
= Type
->getAs
<FunctionProtoType
>()) {
4195 if (Proto
->isNothrow()) {
4196 Flags
|= ItaniumRTTIBuilder::PTI_Noexcept
;
4197 Type
= Ctx
.getFunctionTypeWithExceptionSpec(Type
, EST_None
);
4204 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4205 /// used for pointer types.
4206 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy
) {
4207 // Itanium C++ ABI 2.9.5p7:
4208 // __flags is a flag word describing the cv-qualification and other
4209 // attributes of the type pointed to
4210 unsigned Flags
= extractPBaseFlags(CGM
.getContext(), PointeeTy
);
4212 llvm::Type
*UnsignedIntLTy
=
4213 CGM
.getTypes().ConvertType(CGM
.getContext().UnsignedIntTy
);
4214 Fields
.push_back(llvm::ConstantInt::get(UnsignedIntLTy
, Flags
));
4216 // Itanium C++ ABI 2.9.5p7:
4217 // __pointee is a pointer to the std::type_info derivation for the
4218 // unqualified type being pointed to.
4219 llvm::Constant
*PointeeTypeInfo
=
4220 ItaniumRTTIBuilder(CXXABI
).BuildTypeInfo(PointeeTy
);
4221 Fields
.push_back(PointeeTypeInfo
);
4224 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4225 /// struct, used for member pointer types.
4227 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType
*Ty
) {
4228 QualType PointeeTy
= Ty
->getPointeeType();
4230 // Itanium C++ ABI 2.9.5p7:
4231 // __flags is a flag word describing the cv-qualification and other
4232 // attributes of the type pointed to.
4233 unsigned Flags
= extractPBaseFlags(CGM
.getContext(), PointeeTy
);
4235 const RecordType
*ClassType
= cast
<RecordType
>(Ty
->getClass());
4236 if (IsIncompleteClassType(ClassType
))
4237 Flags
|= PTI_ContainingClassIncomplete
;
4239 llvm::Type
*UnsignedIntLTy
=
4240 CGM
.getTypes().ConvertType(CGM
.getContext().UnsignedIntTy
);
4241 Fields
.push_back(llvm::ConstantInt::get(UnsignedIntLTy
, Flags
));
4243 // Itanium C++ ABI 2.9.5p7:
4244 // __pointee is a pointer to the std::type_info derivation for the
4245 // unqualified type being pointed to.
4246 llvm::Constant
*PointeeTypeInfo
=
4247 ItaniumRTTIBuilder(CXXABI
).BuildTypeInfo(PointeeTy
);
4248 Fields
.push_back(PointeeTypeInfo
);
4250 // Itanium C++ ABI 2.9.5p9:
4251 // __context is a pointer to an abi::__class_type_info corresponding to the
4252 // class type containing the member pointed to
4253 // (e.g., the "A" in "int A::*").
4255 ItaniumRTTIBuilder(CXXABI
).BuildTypeInfo(QualType(ClassType
, 0)));
4258 llvm::Constant
*ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty
) {
4259 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty
);
4262 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl
*RD
) {
4263 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4264 QualType FundamentalTypes
[] = {
4265 getContext().VoidTy
, getContext().NullPtrTy
,
4266 getContext().BoolTy
, getContext().WCharTy
,
4267 getContext().CharTy
, getContext().UnsignedCharTy
,
4268 getContext().SignedCharTy
, getContext().ShortTy
,
4269 getContext().UnsignedShortTy
, getContext().IntTy
,
4270 getContext().UnsignedIntTy
, getContext().LongTy
,
4271 getContext().UnsignedLongTy
, getContext().LongLongTy
,
4272 getContext().UnsignedLongLongTy
, getContext().Int128Ty
,
4273 getContext().UnsignedInt128Ty
, getContext().HalfTy
,
4274 getContext().FloatTy
, getContext().DoubleTy
,
4275 getContext().LongDoubleTy
, getContext().Float128Ty
,
4276 getContext().Char8Ty
, getContext().Char16Ty
,
4277 getContext().Char32Ty
4279 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass
=
4280 RD
->hasAttr
<DLLExportAttr
>() || CGM
.shouldMapVisibilityToDLLExport(RD
)
4281 ? llvm::GlobalValue::DLLExportStorageClass
4282 : llvm::GlobalValue::DefaultStorageClass
;
4283 llvm::GlobalValue::VisibilityTypes Visibility
=
4284 CodeGenModule::GetLLVMVisibility(RD
->getVisibility());
4285 for (const QualType
&FundamentalType
: FundamentalTypes
) {
4286 QualType PointerType
= getContext().getPointerType(FundamentalType
);
4287 QualType PointerTypeConst
= getContext().getPointerType(
4288 FundamentalType
.withConst());
4289 for (QualType Type
: {FundamentalType
, PointerType
, PointerTypeConst
})
4290 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4291 Type
, llvm::GlobalValue::ExternalLinkage
,
4292 Visibility
, DLLStorageClass
);
4296 /// What sort of uniqueness rules should we use for the RTTI for the
4298 ItaniumCXXABI::RTTIUniquenessKind
ItaniumCXXABI::classifyRTTIUniqueness(
4299 QualType CanTy
, llvm::GlobalValue::LinkageTypes Linkage
) const {
4300 if (shouldRTTIBeUnique())
4303 // It's only necessary for linkonce_odr or weak_odr linkage.
4304 if (Linkage
!= llvm::GlobalValue::LinkOnceODRLinkage
&&
4305 Linkage
!= llvm::GlobalValue::WeakODRLinkage
)
4308 // It's only necessary with default visibility.
4309 if (CanTy
->getVisibility() != DefaultVisibility
)
4312 // If we're not required to publish this symbol, hide it.
4313 if (Linkage
== llvm::GlobalValue::LinkOnceODRLinkage
)
4314 return RUK_NonUniqueHidden
;
4316 // If we're required to publish this symbol, as we might be under an
4317 // explicit instantiation, leave it with default visibility but
4318 // enable string-comparisons.
4319 assert(Linkage
== llvm::GlobalValue::WeakODRLinkage
);
4320 return RUK_NonUniqueVisible
;
4323 // Find out how to codegen the complete destructor and constructor
4325 enum class StructorCodegen
{ Emit
, RAUW
, Alias
, COMDAT
};
4327 static StructorCodegen
getCodegenToUse(CodeGenModule
&CGM
,
4328 const CXXMethodDecl
*MD
) {
4329 if (!CGM
.getCodeGenOpts().CXXCtorDtorAliases
)
4330 return StructorCodegen::Emit
;
4332 // The complete and base structors are not equivalent if there are any virtual
4333 // bases, so emit separate functions.
4334 if (MD
->getParent()->getNumVBases())
4335 return StructorCodegen::Emit
;
4337 GlobalDecl AliasDecl
;
4338 if (const auto *DD
= dyn_cast
<CXXDestructorDecl
>(MD
)) {
4339 AliasDecl
= GlobalDecl(DD
, Dtor_Complete
);
4341 const auto *CD
= cast
<CXXConstructorDecl
>(MD
);
4342 AliasDecl
= GlobalDecl(CD
, Ctor_Complete
);
4344 llvm::GlobalValue::LinkageTypes Linkage
= CGM
.getFunctionLinkage(AliasDecl
);
4346 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage
))
4347 return StructorCodegen::RAUW
;
4349 // FIXME: Should we allow available_externally aliases?
4350 if (!llvm::GlobalAlias::isValidLinkage(Linkage
))
4351 return StructorCodegen::RAUW
;
4353 if (llvm::GlobalValue::isWeakForLinker(Linkage
)) {
4354 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4355 if (CGM
.getTarget().getTriple().isOSBinFormatELF() ||
4356 CGM
.getTarget().getTriple().isOSBinFormatWasm())
4357 return StructorCodegen::COMDAT
;
4358 return StructorCodegen::Emit
;
4361 return StructorCodegen::Alias
;
4364 static void emitConstructorDestructorAlias(CodeGenModule
&CGM
,
4365 GlobalDecl AliasDecl
,
4366 GlobalDecl TargetDecl
) {
4367 llvm::GlobalValue::LinkageTypes Linkage
= CGM
.getFunctionLinkage(AliasDecl
);
4369 StringRef MangledName
= CGM
.getMangledName(AliasDecl
);
4370 llvm::GlobalValue
*Entry
= CGM
.GetGlobalValue(MangledName
);
4371 if (Entry
&& !Entry
->isDeclaration())
4374 auto *Aliasee
= cast
<llvm::GlobalValue
>(CGM
.GetAddrOfGlobal(TargetDecl
));
4376 // Create the alias with no name.
4377 auto *Alias
= llvm::GlobalAlias::create(Linkage
, "", Aliasee
);
4379 // Constructors and destructors are always unnamed_addr.
4380 Alias
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
4382 // Switch any previous uses to the alias.
4384 assert(Entry
->getType() == Aliasee
->getType() &&
4385 "declaration exists with different type");
4386 Alias
->takeName(Entry
);
4387 Entry
->replaceAllUsesWith(Alias
);
4388 Entry
->eraseFromParent();
4390 Alias
->setName(MangledName
);
4393 // Finally, set up the alias with its proper name and attributes.
4394 CGM
.SetCommonAttributes(AliasDecl
, Alias
);
4397 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD
) {
4398 auto *MD
= cast
<CXXMethodDecl
>(GD
.getDecl());
4399 auto *CD
= dyn_cast
<CXXConstructorDecl
>(MD
);
4400 const CXXDestructorDecl
*DD
= CD
? nullptr : cast
<CXXDestructorDecl
>(MD
);
4402 StructorCodegen CGType
= getCodegenToUse(CGM
, MD
);
4404 if (CD
? GD
.getCtorType() == Ctor_Complete
4405 : GD
.getDtorType() == Dtor_Complete
) {
4406 GlobalDecl BaseDecl
;
4408 BaseDecl
= GD
.getWithCtorType(Ctor_Base
);
4410 BaseDecl
= GD
.getWithDtorType(Dtor_Base
);
4412 if (CGType
== StructorCodegen::Alias
|| CGType
== StructorCodegen::COMDAT
) {
4413 emitConstructorDestructorAlias(CGM
, GD
, BaseDecl
);
4417 if (CGType
== StructorCodegen::RAUW
) {
4418 StringRef MangledName
= CGM
.getMangledName(GD
);
4419 auto *Aliasee
= CGM
.GetAddrOfGlobal(BaseDecl
);
4420 CGM
.addReplacement(MangledName
, Aliasee
);
4425 // The base destructor is equivalent to the base destructor of its
4426 // base class if there is exactly one non-virtual base class with a
4427 // non-trivial destructor, there are no fields with a non-trivial
4428 // destructor, and the body of the destructor is trivial.
4429 if (DD
&& GD
.getDtorType() == Dtor_Base
&&
4430 CGType
!= StructorCodegen::COMDAT
&&
4431 !CGM
.TryEmitBaseDestructorAsAlias(DD
))
4434 // FIXME: The deleting destructor is equivalent to the selected operator
4436 // * either the delete is a destroying operator delete or the destructor
4437 // would be trivial if it weren't virtual,
4438 // * the conversion from the 'this' parameter to the first parameter of the
4439 // destructor is equivalent to a bitcast,
4440 // * the destructor does not have an implicit "this" return, and
4441 // * the operator delete has the same calling convention and IR function type
4442 // as the destructor.
4443 // In such cases we should try to emit the deleting dtor as an alias to the
4444 // selected 'operator delete'.
4446 llvm::Function
*Fn
= CGM
.codegenCXXStructor(GD
);
4448 if (CGType
== StructorCodegen::COMDAT
) {
4449 SmallString
<256> Buffer
;
4450 llvm::raw_svector_ostream
Out(Buffer
);
4452 getMangleContext().mangleCXXDtorComdat(DD
, Out
);
4454 getMangleContext().mangleCXXCtorComdat(CD
, Out
);
4455 llvm::Comdat
*C
= CGM
.getModule().getOrInsertComdat(Out
.str());
4458 CGM
.maybeSetTrivialComdat(*MD
, *Fn
);
4462 static llvm::FunctionCallee
getBeginCatchFn(CodeGenModule
&CGM
) {
4463 // void *__cxa_begin_catch(void*);
4464 llvm::FunctionType
*FTy
= llvm::FunctionType::get(
4465 CGM
.Int8PtrTy
, CGM
.Int8PtrTy
, /*isVarArg=*/false);
4467 return CGM
.CreateRuntimeFunction(FTy
, "__cxa_begin_catch");
4470 static llvm::FunctionCallee
getEndCatchFn(CodeGenModule
&CGM
) {
4471 // void __cxa_end_catch();
4472 llvm::FunctionType
*FTy
=
4473 llvm::FunctionType::get(CGM
.VoidTy
, /*isVarArg=*/false);
4475 return CGM
.CreateRuntimeFunction(FTy
, "__cxa_end_catch");
4478 static llvm::FunctionCallee
getGetExceptionPtrFn(CodeGenModule
&CGM
) {
4479 // void *__cxa_get_exception_ptr(void*);
4480 llvm::FunctionType
*FTy
= llvm::FunctionType::get(
4481 CGM
.Int8PtrTy
, CGM
.Int8PtrTy
, /*isVarArg=*/false);
4483 return CGM
.CreateRuntimeFunction(FTy
, "__cxa_get_exception_ptr");
4487 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4488 /// exception type lets us state definitively that the thrown exception
4489 /// type does not have a destructor. In particular:
4490 /// - Catch-alls tell us nothing, so we have to conservatively
4491 /// assume that the thrown exception might have a destructor.
4492 /// - Catches by reference behave according to their base types.
4493 /// - Catches of non-record types will only trigger for exceptions
4494 /// of non-record types, which never have destructors.
4495 /// - Catches of record types can trigger for arbitrary subclasses
4496 /// of the caught type, so we have to assume the actual thrown
4497 /// exception type might have a throwing destructor, even if the
4498 /// caught type's destructor is trivial or nothrow.
4499 struct CallEndCatch final
: EHScopeStack::Cleanup
{
4500 CallEndCatch(bool MightThrow
) : MightThrow(MightThrow
) {}
4503 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
4505 CGF
.EmitNounwindRuntimeCall(getEndCatchFn(CGF
.CGM
));
4509 CGF
.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF
.CGM
));
4514 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4515 /// __cxa_end_catch.
4517 /// \param EndMightThrow - true if __cxa_end_catch might throw
4518 static llvm::Value
*CallBeginCatch(CodeGenFunction
&CGF
,
4520 bool EndMightThrow
) {
4521 llvm::CallInst
*call
=
4522 CGF
.EmitNounwindRuntimeCall(getBeginCatchFn(CGF
.CGM
), Exn
);
4524 CGF
.EHStack
.pushCleanup
<CallEndCatch
>(NormalAndEHCleanup
, EndMightThrow
);
4529 /// A "special initializer" callback for initializing a catch
4530 /// parameter during catch initialization.
4531 static void InitCatchParam(CodeGenFunction
&CGF
,
4532 const VarDecl
&CatchParam
,
4534 SourceLocation Loc
) {
4535 // Load the exception from where the landing pad saved it.
4536 llvm::Value
*Exn
= CGF
.getExceptionFromSlot();
4538 CanQualType CatchType
=
4539 CGF
.CGM
.getContext().getCanonicalType(CatchParam
.getType());
4540 llvm::Type
*LLVMCatchTy
= CGF
.ConvertTypeForMem(CatchType
);
4542 // If we're catching by reference, we can just cast the object
4543 // pointer to the appropriate pointer.
4544 if (isa
<ReferenceType
>(CatchType
)) {
4545 QualType CaughtType
= cast
<ReferenceType
>(CatchType
)->getPointeeType();
4546 bool EndCatchMightThrow
= CaughtType
->isRecordType();
4548 // __cxa_begin_catch returns the adjusted object pointer.
4549 llvm::Value
*AdjustedExn
= CallBeginCatch(CGF
, Exn
, EndCatchMightThrow
);
4551 // We have no way to tell the personality function that we're
4552 // catching by reference, so if we're catching a pointer,
4553 // __cxa_begin_catch will actually return that pointer by value.
4554 if (const PointerType
*PT
= dyn_cast
<PointerType
>(CaughtType
)) {
4555 QualType PointeeType
= PT
->getPointeeType();
4557 // When catching by reference, generally we should just ignore
4558 // this by-value pointer and use the exception object instead.
4559 if (!PointeeType
->isRecordType()) {
4561 // Exn points to the struct _Unwind_Exception header, which
4562 // we have to skip past in order to reach the exception data.
4563 unsigned HeaderSize
=
4564 CGF
.CGM
.getTargetCodeGenInfo().getSizeOfUnwindException();
4566 CGF
.Builder
.CreateConstGEP1_32(CGF
.Int8Ty
, Exn
, HeaderSize
);
4568 // However, if we're catching a pointer-to-record type that won't
4569 // work, because the personality function might have adjusted
4570 // the pointer. There's actually no way for us to fully satisfy
4571 // the language/ABI contract here: we can't use Exn because it
4572 // might have the wrong adjustment, but we can't use the by-value
4573 // pointer because it's off by a level of abstraction.
4575 // The current solution is to dump the adjusted pointer into an
4576 // alloca, which breaks language semantics (because changing the
4577 // pointer doesn't change the exception) but at least works.
4578 // The better solution would be to filter out non-exact matches
4579 // and rethrow them, but this is tricky because the rethrow
4580 // really needs to be catchable by other sites at this landing
4581 // pad. The best solution is to fix the personality function.
4583 // Pull the pointer for the reference type off.
4584 llvm::Type
*PtrTy
= CGF
.ConvertTypeForMem(CaughtType
);
4586 // Create the temporary and write the adjusted pointer into it.
4588 CGF
.CreateTempAlloca(PtrTy
, CGF
.getPointerAlign(), "exn.byref.tmp");
4589 llvm::Value
*Casted
= CGF
.Builder
.CreateBitCast(AdjustedExn
, PtrTy
);
4590 CGF
.Builder
.CreateStore(Casted
, ExnPtrTmp
);
4592 // Bind the reference to the temporary.
4593 AdjustedExn
= ExnPtrTmp
.getPointer();
4597 llvm::Value
*ExnCast
=
4598 CGF
.Builder
.CreateBitCast(AdjustedExn
, LLVMCatchTy
, "exn.byref");
4599 CGF
.Builder
.CreateStore(ExnCast
, ParamAddr
);
4603 // Scalars and complexes.
4604 TypeEvaluationKind TEK
= CGF
.getEvaluationKind(CatchType
);
4605 if (TEK
!= TEK_Aggregate
) {
4606 llvm::Value
*AdjustedExn
= CallBeginCatch(CGF
, Exn
, false);
4608 // If the catch type is a pointer type, __cxa_begin_catch returns
4609 // the pointer by value.
4610 if (CatchType
->hasPointerRepresentation()) {
4611 llvm::Value
*CastExn
=
4612 CGF
.Builder
.CreateBitCast(AdjustedExn
, LLVMCatchTy
, "exn.casted");
4614 switch (CatchType
.getQualifiers().getObjCLifetime()) {
4615 case Qualifiers::OCL_Strong
:
4616 CastExn
= CGF
.EmitARCRetainNonBlock(CastExn
);
4619 case Qualifiers::OCL_None
:
4620 case Qualifiers::OCL_ExplicitNone
:
4621 case Qualifiers::OCL_Autoreleasing
:
4622 CGF
.Builder
.CreateStore(CastExn
, ParamAddr
);
4625 case Qualifiers::OCL_Weak
:
4626 CGF
.EmitARCInitWeak(ParamAddr
, CastExn
);
4629 llvm_unreachable("bad ownership qualifier!");
4632 // Otherwise, it returns a pointer into the exception object.
4634 LValue srcLV
= CGF
.MakeNaturalAlignAddrLValue(AdjustedExn
, CatchType
);
4635 LValue destLV
= CGF
.MakeAddrLValue(ParamAddr
, CatchType
);
4638 CGF
.EmitStoreOfComplex(CGF
.EmitLoadOfComplex(srcLV
, Loc
), destLV
,
4642 llvm::Value
*ExnLoad
= CGF
.EmitLoadOfScalar(srcLV
, Loc
);
4643 CGF
.EmitStoreOfScalar(ExnLoad
, destLV
, /*init*/ true);
4647 llvm_unreachable("evaluation kind filtered out!");
4649 llvm_unreachable("bad evaluation kind");
4652 assert(isa
<RecordType
>(CatchType
) && "unexpected catch type!");
4653 auto catchRD
= CatchType
->getAsCXXRecordDecl();
4654 CharUnits caughtExnAlignment
= CGF
.CGM
.getClassPointerAlignment(catchRD
);
4657 llvm::PointerType::getUnqual(CGF
.getLLVMContext()); // addrspace 0 ok
4659 // Check for a copy expression. If we don't have a copy expression,
4660 // that means a trivial copy is okay.
4661 const Expr
*copyExpr
= CatchParam
.getInit();
4663 llvm::Value
*rawAdjustedExn
= CallBeginCatch(CGF
, Exn
, true);
4664 Address
adjustedExn(CGF
.Builder
.CreateBitCast(rawAdjustedExn
, PtrTy
),
4665 LLVMCatchTy
, caughtExnAlignment
);
4666 LValue Dest
= CGF
.MakeAddrLValue(ParamAddr
, CatchType
);
4667 LValue Src
= CGF
.MakeAddrLValue(adjustedExn
, CatchType
);
4668 CGF
.EmitAggregateCopy(Dest
, Src
, CatchType
, AggValueSlot::DoesNotOverlap
);
4672 // We have to call __cxa_get_exception_ptr to get the adjusted
4673 // pointer before copying.
4674 llvm::CallInst
*rawAdjustedExn
=
4675 CGF
.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF
.CGM
), Exn
);
4677 // Cast that to the appropriate type.
4678 Address
adjustedExn(CGF
.Builder
.CreateBitCast(rawAdjustedExn
, PtrTy
),
4679 LLVMCatchTy
, caughtExnAlignment
);
4681 // The copy expression is defined in terms of an OpaqueValueExpr.
4682 // Find it and map it to the adjusted expression.
4683 CodeGenFunction::OpaqueValueMapping
4684 opaque(CGF
, OpaqueValueExpr::findInCopyConstruct(copyExpr
),
4685 CGF
.MakeAddrLValue(adjustedExn
, CatchParam
.getType()));
4687 // Call the copy ctor in a terminate scope.
4688 CGF
.EHStack
.pushTerminate();
4690 // Perform the copy construction.
4691 CGF
.EmitAggExpr(copyExpr
,
4692 AggValueSlot::forAddr(ParamAddr
, Qualifiers(),
4693 AggValueSlot::IsNotDestructed
,
4694 AggValueSlot::DoesNotNeedGCBarriers
,
4695 AggValueSlot::IsNotAliased
,
4696 AggValueSlot::DoesNotOverlap
));
4698 // Leave the terminate scope.
4699 CGF
.EHStack
.popTerminate();
4701 // Undo the opaque value mapping.
4704 // Finally we can call __cxa_begin_catch.
4705 CallBeginCatch(CGF
, Exn
, true);
4708 /// Begins a catch statement by initializing the catch variable and
4709 /// calling __cxa_begin_catch.
4710 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction
&CGF
,
4711 const CXXCatchStmt
*S
) {
4712 // We have to be very careful with the ordering of cleanups here:
4713 // C++ [except.throw]p4:
4714 // The destruction [of the exception temporary] occurs
4715 // immediately after the destruction of the object declared in
4716 // the exception-declaration in the handler.
4718 // So the precise ordering is:
4719 // 1. Construct catch variable.
4720 // 2. __cxa_begin_catch
4721 // 3. Enter __cxa_end_catch cleanup
4722 // 4. Enter dtor cleanup
4724 // We do this by using a slightly abnormal initialization process.
4725 // Delegation sequence:
4726 // - ExitCXXTryStmt opens a RunCleanupsScope
4727 // - EmitAutoVarAlloca creates the variable and debug info
4728 // - InitCatchParam initializes the variable from the exception
4729 // - CallBeginCatch calls __cxa_begin_catch
4730 // - CallBeginCatch enters the __cxa_end_catch cleanup
4731 // - EmitAutoVarCleanups enters the variable destructor cleanup
4732 // - EmitCXXTryStmt emits the code for the catch body
4733 // - EmitCXXTryStmt close the RunCleanupsScope
4735 VarDecl
*CatchParam
= S
->getExceptionDecl();
4737 llvm::Value
*Exn
= CGF
.getExceptionFromSlot();
4738 CallBeginCatch(CGF
, Exn
, true);
4743 CodeGenFunction::AutoVarEmission var
= CGF
.EmitAutoVarAlloca(*CatchParam
);
4744 InitCatchParam(CGF
, *CatchParam
, var
.getObjectAddress(CGF
), S
->getBeginLoc());
4745 CGF
.EmitAutoVarCleanups(var
);
4748 /// Get or define the following function:
4749 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
4750 /// This code is used only in C++.
4751 static llvm::FunctionCallee
getClangCallTerminateFn(CodeGenModule
&CGM
) {
4752 ASTContext
&C
= CGM
.getContext();
4753 const CGFunctionInfo
&FI
= CGM
.getTypes().arrangeBuiltinFunctionDeclaration(
4754 C
.VoidTy
, {C
.getPointerType(C
.CharTy
)});
4755 llvm::FunctionType
*fnTy
= CGM
.getTypes().GetFunctionType(FI
);
4756 llvm::FunctionCallee fnRef
= CGM
.CreateRuntimeFunction(
4757 fnTy
, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4758 llvm::Function
*fn
=
4759 cast
<llvm::Function
>(fnRef
.getCallee()->stripPointerCasts());
4761 CGM
.SetLLVMFunctionAttributes(GlobalDecl(), FI
, fn
, /*IsThunk=*/false);
4762 CGM
.SetLLVMFunctionAttributesForDefinition(nullptr, fn
);
4763 fn
->setDoesNotThrow();
4764 fn
->setDoesNotReturn();
4766 // What we really want is to massively penalize inlining without
4767 // forbidding it completely. The difference between that and
4768 // 'noinline' is negligible.
4769 fn
->addFnAttr(llvm::Attribute::NoInline
);
4771 // Allow this function to be shared across translation units, but
4772 // we don't want it to turn into an exported symbol.
4773 fn
->setLinkage(llvm::Function::LinkOnceODRLinkage
);
4774 fn
->setVisibility(llvm::Function::HiddenVisibility
);
4775 if (CGM
.supportsCOMDAT())
4776 fn
->setComdat(CGM
.getModule().getOrInsertComdat(fn
->getName()));
4778 // Set up the function.
4779 llvm::BasicBlock
*entry
=
4780 llvm::BasicBlock::Create(CGM
.getLLVMContext(), "", fn
);
4781 CGBuilderTy
builder(CGM
, entry
);
4783 // Pull the exception pointer out of the parameter list.
4784 llvm::Value
*exn
= &*fn
->arg_begin();
4786 // Call __cxa_begin_catch(exn).
4787 llvm::CallInst
*catchCall
= builder
.CreateCall(getBeginCatchFn(CGM
), exn
);
4788 catchCall
->setDoesNotThrow();
4789 catchCall
->setCallingConv(CGM
.getRuntimeCC());
4791 // Call std::terminate().
4792 llvm::CallInst
*termCall
= builder
.CreateCall(CGM
.getTerminateFn());
4793 termCall
->setDoesNotThrow();
4794 termCall
->setDoesNotReturn();
4795 termCall
->setCallingConv(CGM
.getRuntimeCC());
4797 // std::terminate cannot return.
4798 builder
.CreateUnreachable();
4804 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction
&CGF
,
4806 // In C++, we want to call __cxa_begin_catch() before terminating.
4808 assert(CGF
.CGM
.getLangOpts().CPlusPlus
);
4809 return CGF
.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF
.CGM
), Exn
);
4811 return CGF
.EmitNounwindRuntimeCall(CGF
.CGM
.getTerminateFn());
4814 std::pair
<llvm::Value
*, const CXXRecordDecl
*>
4815 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction
&CGF
, Address This
,
4816 const CXXRecordDecl
*RD
) {
4817 return {CGF
.GetVTablePtr(This
, CGM
.Int8PtrTy
, RD
), RD
};
4820 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction
&CGF
,
4821 const CXXCatchStmt
*C
) {
4822 if (CGF
.getTarget().hasFeature("exception-handling"))
4823 CGF
.EHStack
.pushCleanup
<CatchRetScope
>(
4824 NormalCleanup
, cast
<llvm::CatchPadInst
>(CGF
.CurrentFuncletPad
));
4825 ItaniumCXXABI::emitBeginCatch(CGF
, C
);
4829 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction
&CGF
,
4831 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4832 // the violating exception to mark it handled, but it is currently hard to do
4833 // with wasm EH instruction structure with catch/catch_all, we just call
4834 // std::terminate and ignore the violating exception as in CGCXXABI.
4835 // TODO Consider code transformation that makes calling __clang_call_terminate
4837 return CGCXXABI::emitTerminateForUnexpectedException(CGF
, Exn
);
4840 /// Register a global destructor as best as we know how.
4841 void XLCXXABI::registerGlobalDtor(CodeGenFunction
&CGF
, const VarDecl
&D
,
4842 llvm::FunctionCallee Dtor
,
4843 llvm::Constant
*Addr
) {
4844 if (D
.getTLSKind() != VarDecl::TLS_None
) {
4845 llvm::PointerType
*PtrTy
=
4846 llvm::PointerType::getUnqual(CGF
.getLLVMContext());
4848 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4849 llvm::FunctionType
*AtExitTy
=
4850 llvm::FunctionType::get(CGM
.IntTy
, {CGM
.IntTy
, PtrTy
}, true);
4852 // Fetch the actual function.
4853 llvm::FunctionCallee AtExit
=
4854 CGM
.CreateRuntimeFunction(AtExitTy
, "__pt_atexit_np");
4856 // Create __dtor function for the var decl.
4857 llvm::Function
*DtorStub
= CGF
.createTLSAtExitStub(D
, Dtor
, Addr
, AtExit
);
4859 // Register above __dtor with atexit().
4860 // First param is flags and must be 0, second param is function ptr
4861 llvm::Value
*NV
= llvm::Constant::getNullValue(CGM
.IntTy
);
4862 CGF
.EmitNounwindRuntimeCall(AtExit
, {NV
, DtorStub
});
4864 // Cannot unregister TLS __dtor so done
4868 // Create __dtor function for the var decl.
4869 llvm::Function
*DtorStub
= CGF
.createAtExitStub(D
, Dtor
, Addr
);
4871 // Register above __dtor with atexit().
4872 CGF
.registerGlobalDtorWithAtExit(DtorStub
);
4874 // Emit __finalize function to unregister __dtor and (as appropriate) call
4876 emitCXXStermFinalizer(D
, DtorStub
, Addr
);
4879 void XLCXXABI::emitCXXStermFinalizer(const VarDecl
&D
, llvm::Function
*dtorStub
,
4880 llvm::Constant
*addr
) {
4881 llvm::FunctionType
*FTy
= llvm::FunctionType::get(CGM
.VoidTy
, false);
4882 SmallString
<256> FnName
;
4884 llvm::raw_svector_ostream
Out(FnName
);
4885 getMangleContext().mangleDynamicStermFinalizer(&D
, Out
);
4888 // Create the finalization action associated with a variable.
4889 const CGFunctionInfo
&FI
= CGM
.getTypes().arrangeNullaryFunction();
4890 llvm::Function
*StermFinalizer
= CGM
.CreateGlobalInitOrCleanUpFunction(
4891 FTy
, FnName
.str(), FI
, D
.getLocation());
4893 CodeGenFunction
CGF(CGM
);
4895 CGF
.StartFunction(GlobalDecl(), CGM
.getContext().VoidTy
, StermFinalizer
, FI
,
4896 FunctionArgList(), D
.getLocation(),
4897 D
.getInit()->getExprLoc());
4899 // The unatexit subroutine unregisters __dtor functions that were previously
4900 // registered by the atexit subroutine. If the referenced function is found,
4901 // the unatexit returns a value of 0, meaning that the cleanup is still
4902 // pending (and we should call the __dtor function).
4903 llvm::Value
*V
= CGF
.unregisterGlobalDtorWithUnAtExit(dtorStub
);
4905 llvm::Value
*NeedsDestruct
= CGF
.Builder
.CreateIsNull(V
, "needs_destruct");
4907 llvm::BasicBlock
*DestructCallBlock
= CGF
.createBasicBlock("destruct.call");
4908 llvm::BasicBlock
*EndBlock
= CGF
.createBasicBlock("destruct.end");
4910 // Check if unatexit returns a value of 0. If it does, jump to
4911 // DestructCallBlock, otherwise jump to EndBlock directly.
4912 CGF
.Builder
.CreateCondBr(NeedsDestruct
, DestructCallBlock
, EndBlock
);
4914 CGF
.EmitBlock(DestructCallBlock
);
4916 // Emit the call to dtorStub.
4917 llvm::CallInst
*CI
= CGF
.Builder
.CreateCall(dtorStub
);
4919 // Make sure the call and the callee agree on calling convention.
4920 CI
->setCallingConv(dtorStub
->getCallingConv());
4922 CGF
.EmitBlock(EndBlock
);
4924 CGF
.FinishFunction();
4926 if (auto *IPA
= D
.getAttr
<InitPriorityAttr
>()) {
4927 CGM
.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer
,
4928 IPA
->getPriority());
4929 } else if (isTemplateInstantiation(D
.getTemplateSpecializationKind()) ||
4930 getContext().GetGVALinkageForVariable(&D
) == GVA_DiscardableODR
) {
4931 // According to C++ [basic.start.init]p2, class template static data
4932 // members (i.e., implicitly or explicitly instantiated specializations)
4933 // have unordered initialization. As a consequence, we can put them into
4934 // their own llvm.global_dtors entry.
4935 CGM
.AddCXXStermFinalizerToGlobalDtor(StermFinalizer
, 65535);
4937 CGM
.AddCXXStermFinalizerEntry(StermFinalizer
);