[ELF] Reorder SectionBase/InputSectionBase members
[llvm-project.git] / clang / lib / CodeGen / ItaniumCXXABI.cpp
blob8cbd09d02c7556eb88eac91755a4a94e5d4fbd4a
1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI. The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 // https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 // https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
18 //===----------------------------------------------------------------------===//
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
39 #include <optional>
41 using namespace clang;
42 using namespace CodeGen;
44 namespace {
45 class ItaniumCXXABI : public CodeGen::CGCXXABI {
46 /// VTables - All the vtables which have been defined.
47 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
49 /// All the thread wrapper functions that have been used.
50 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
51 ThreadWrappers;
53 protected:
54 bool UseARMMethodPtrABI;
55 bool UseARMGuardVarABI;
56 bool Use32BitVTableOffsetABI;
58 ItaniumMangleContext &getMangleContext() {
59 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
62 public:
63 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
64 bool UseARMMethodPtrABI = false,
65 bool UseARMGuardVarABI = false) :
66 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
67 UseARMGuardVarABI(UseARMGuardVarABI),
68 Use32BitVTableOffsetABI(false) { }
70 bool classifyReturnType(CGFunctionInfo &FI) const override;
72 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
73 // If C++ prohibits us from making a copy, pass by address.
74 if (!RD->canPassInRegisters())
75 return RAA_Indirect;
76 return RAA_Default;
79 bool isThisCompleteObject(GlobalDecl GD) const override {
80 // The Itanium ABI has separate complete-object vs. base-object
81 // variants of both constructors and destructors.
82 if (isa<CXXDestructorDecl>(GD.getDecl())) {
83 switch (GD.getDtorType()) {
84 case Dtor_Complete:
85 case Dtor_Deleting:
86 return true;
88 case Dtor_Base:
89 return false;
91 case Dtor_Comdat:
92 llvm_unreachable("emitting dtor comdat as function?");
94 llvm_unreachable("bad dtor kind");
96 if (isa<CXXConstructorDecl>(GD.getDecl())) {
97 switch (GD.getCtorType()) {
98 case Ctor_Complete:
99 return true;
101 case Ctor_Base:
102 return false;
104 case Ctor_CopyingClosure:
105 case Ctor_DefaultClosure:
106 llvm_unreachable("closure ctors in Itanium ABI?");
108 case Ctor_Comdat:
109 llvm_unreachable("emitting ctor comdat as function?");
111 llvm_unreachable("bad dtor kind");
114 // No other kinds.
115 return false;
118 bool isZeroInitializable(const MemberPointerType *MPT) override;
120 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
122 CGCallee
123 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
124 const Expr *E,
125 Address This,
126 llvm::Value *&ThisPtrForCall,
127 llvm::Value *MemFnPtr,
128 const MemberPointerType *MPT) override;
130 llvm::Value *
131 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
132 Address Base,
133 llvm::Value *MemPtr,
134 const MemberPointerType *MPT) override;
136 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
137 const CastExpr *E,
138 llvm::Value *Src) override;
139 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
140 llvm::Constant *Src) override;
142 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
144 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
145 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
146 CharUnits offset) override;
147 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
148 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
149 CharUnits ThisAdjustment);
151 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
152 llvm::Value *L, llvm::Value *R,
153 const MemberPointerType *MPT,
154 bool Inequality) override;
156 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
157 llvm::Value *Addr,
158 const MemberPointerType *MPT) override;
160 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
161 Address Ptr, QualType ElementType,
162 const CXXDestructorDecl *Dtor) override;
164 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
165 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
167 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
169 llvm::CallInst *
170 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
171 llvm::Value *Exn) override;
173 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
174 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
175 CatchTypeInfo
176 getAddrOfCXXCatchHandlerType(QualType Ty,
177 QualType CatchHandlerType) override {
178 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
181 bool shouldTypeidBeNullChecked(QualType SrcRecordTy) override;
182 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
183 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
184 Address ThisPtr,
185 llvm::Type *StdTypeInfoPtrTy) override;
187 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
188 QualType SrcRecordTy) override;
190 /// Determine whether we know that all instances of type RecordTy will have
191 /// the same vtable pointer values, that is distinct from all other vtable
192 /// pointers. While this is required by the Itanium ABI, it doesn't happen in
193 /// practice in some cases due to language extensions.
194 bool hasUniqueVTablePointer(QualType RecordTy) {
195 const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
197 // Under -fapple-kext, multiple definitions of the same vtable may be
198 // emitted.
199 if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
200 getContext().getLangOpts().AppleKext)
201 return false;
203 // If the type_info* would be null, the vtable might be merged with that of
204 // another type.
205 if (!CGM.shouldEmitRTTI())
206 return false;
208 // If there's only one definition of the vtable in the program, it has a
209 // unique address.
210 if (!llvm::GlobalValue::isWeakForLinker(CGM.getVTableLinkage(RD)))
211 return true;
213 // Even if there are multiple definitions of the vtable, they are required
214 // by the ABI to use the same symbol name, so should be merged at load
215 // time. However, if the class has hidden visibility, there can be
216 // different versions of the class in different modules, and the ABI
217 // library might treat them as being the same.
218 if (CGM.GetLLVMVisibility(RD->getVisibility()) !=
219 llvm::GlobalValue::DefaultVisibility)
220 return false;
222 return true;
225 bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
226 return hasUniqueVTablePointer(DestRecordTy);
229 llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
230 QualType SrcRecordTy, QualType DestTy,
231 QualType DestRecordTy,
232 llvm::BasicBlock *CastEnd) override;
234 llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
235 QualType SrcRecordTy, QualType DestTy,
236 QualType DestRecordTy,
237 llvm::BasicBlock *CastSuccess,
238 llvm::BasicBlock *CastFail) override;
240 llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
241 QualType SrcRecordTy) override;
243 bool EmitBadCastCall(CodeGenFunction &CGF) override;
245 llvm::Value *
246 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
247 const CXXRecordDecl *ClassDecl,
248 const CXXRecordDecl *BaseClassDecl) override;
250 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
252 AddedStructorArgCounts
253 buildStructorSignature(GlobalDecl GD,
254 SmallVectorImpl<CanQualType> &ArgTys) override;
256 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
257 CXXDtorType DT) const override {
258 // Itanium does not emit any destructor variant as an inline thunk.
259 // Delegating may occur as an optimization, but all variants are either
260 // emitted with external linkage or as linkonce if they are inline and used.
261 return false;
264 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
266 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
267 FunctionArgList &Params) override;
269 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
271 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
272 const CXXConstructorDecl *D,
273 CXXCtorType Type,
274 bool ForVirtualBase,
275 bool Delegating) override;
277 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
278 const CXXDestructorDecl *DD,
279 CXXDtorType Type,
280 bool ForVirtualBase,
281 bool Delegating) override;
283 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
284 CXXDtorType Type, bool ForVirtualBase,
285 bool Delegating, Address This,
286 QualType ThisTy) override;
288 void emitVTableDefinitions(CodeGenVTables &CGVT,
289 const CXXRecordDecl *RD) override;
291 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
292 CodeGenFunction::VPtr Vptr) override;
294 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
295 return true;
298 llvm::Constant *
299 getVTableAddressPoint(BaseSubobject Base,
300 const CXXRecordDecl *VTableClass) override;
302 llvm::Value *getVTableAddressPointInStructor(
303 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
304 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
306 llvm::Value *getVTableAddressPointInStructorWithVTT(
307 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
308 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
310 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
311 CharUnits VPtrOffset) override;
313 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
314 Address This, llvm::Type *Ty,
315 SourceLocation Loc) override;
317 llvm::Value *
318 EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor,
319 CXXDtorType DtorType, Address This,
320 DeleteOrMemberCallExpr E,
321 llvm::CallBase **CallOrInvoke) override;
323 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
325 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
326 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
328 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
329 bool ReturnAdjustment) override {
330 // Allow inlining of thunks by emitting them with available_externally
331 // linkage together with vtables when needed.
332 if (ForVTable && !Thunk->hasLocalLinkage())
333 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
334 CGM.setGVProperties(Thunk, GD);
337 bool exportThunk() override { return true; }
339 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
340 const CXXRecordDecl *UnadjustedThisClass,
341 const ThunkInfo &TI) override;
343 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
344 const CXXRecordDecl *UnadjustedRetClass,
345 const ReturnAdjustment &RA) override;
347 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
348 FunctionArgList &Args) const override {
349 assert(!Args.empty() && "expected the arglist to not be empty!");
350 return Args.size() - 1;
353 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
354 StringRef GetDeletedVirtualCallName() override
355 { return "__cxa_deleted_virtual"; }
357 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
358 Address InitializeArrayCookie(CodeGenFunction &CGF,
359 Address NewPtr,
360 llvm::Value *NumElements,
361 const CXXNewExpr *expr,
362 QualType ElementType) override;
363 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
364 Address allocPtr,
365 CharUnits cookieSize) override;
367 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
368 llvm::GlobalVariable *DeclPtr,
369 bool PerformInit) override;
370 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
371 llvm::FunctionCallee dtor,
372 llvm::Constant *addr) override;
374 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
375 llvm::Value *Val);
376 void EmitThreadLocalInitFuncs(
377 CodeGenModule &CGM,
378 ArrayRef<const VarDecl *> CXXThreadLocals,
379 ArrayRef<llvm::Function *> CXXThreadLocalInits,
380 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
382 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
383 return !isEmittedWithConstantInitializer(VD) ||
384 mayNeedDestruction(VD);
386 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
387 QualType LValType) override;
389 bool NeedsVTTParameter(GlobalDecl GD) override;
391 llvm::Constant *
392 getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD);
394 /**************************** RTTI Uniqueness ******************************/
396 protected:
397 /// Returns true if the ABI requires RTTI type_info objects to be unique
398 /// across a program.
399 virtual bool shouldRTTIBeUnique() const { return true; }
401 public:
402 /// What sort of unique-RTTI behavior should we use?
403 enum RTTIUniquenessKind {
404 /// We are guaranteeing, or need to guarantee, that the RTTI string
405 /// is unique.
406 RUK_Unique,
408 /// We are not guaranteeing uniqueness for the RTTI string, so we
409 /// can demote to hidden visibility but must use string comparisons.
410 RUK_NonUniqueHidden,
412 /// We are not guaranteeing uniqueness for the RTTI string, so we
413 /// have to use string comparisons, but we also have to emit it with
414 /// non-hidden visibility.
415 RUK_NonUniqueVisible
418 /// Return the required visibility status for the given type and linkage in
419 /// the current ABI.
420 RTTIUniquenessKind
421 classifyRTTIUniqueness(QualType CanTy,
422 llvm::GlobalValue::LinkageTypes Linkage) const;
423 friend class ItaniumRTTIBuilder;
425 void emitCXXStructor(GlobalDecl GD) override;
427 std::pair<llvm::Value *, const CXXRecordDecl *>
428 LoadVTablePtr(CodeGenFunction &CGF, Address This,
429 const CXXRecordDecl *RD) override;
431 private:
432 llvm::Constant *
433 getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD);
435 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
436 const auto &VtableLayout =
437 CGM.getItaniumVTableContext().getVTableLayout(RD);
439 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
440 // Skip empty slot.
441 if (!VtableComponent.isUsedFunctionPointerKind())
442 continue;
444 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
445 const FunctionDecl *FD = Method->getDefinition();
446 const bool IsInlined =
447 Method->getCanonicalDecl()->isInlined() || (FD && FD->isInlined());
448 if (!IsInlined)
449 continue;
451 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
452 auto *Entry = CGM.GetGlobalValue(Name);
453 // This checks if virtual inline function has already been emitted.
454 // Note that it is possible that this inline function would be emitted
455 // after trying to emit vtable speculatively. Because of this we do
456 // an extra pass after emitting all deferred vtables to find and emit
457 // these vtables opportunistically.
458 if (!Entry || Entry->isDeclaration())
459 return true;
461 return false;
464 bool isVTableHidden(const CXXRecordDecl *RD) const {
465 const auto &VtableLayout =
466 CGM.getItaniumVTableContext().getVTableLayout(RD);
468 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
469 if (VtableComponent.isRTTIKind()) {
470 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
471 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
472 return true;
473 } else if (VtableComponent.isUsedFunctionPointerKind()) {
474 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
475 if (Method->getVisibility() == Visibility::HiddenVisibility &&
476 !Method->isDefined())
477 return true;
480 return false;
484 class ARMCXXABI : public ItaniumCXXABI {
485 public:
486 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
487 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
488 /*UseARMGuardVarABI=*/true) {}
490 bool constructorsAndDestructorsReturnThis() const override { return true; }
492 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
493 QualType ResTy) override;
495 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
496 Address InitializeArrayCookie(CodeGenFunction &CGF,
497 Address NewPtr,
498 llvm::Value *NumElements,
499 const CXXNewExpr *expr,
500 QualType ElementType) override;
501 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
502 CharUnits cookieSize) override;
505 class AppleARM64CXXABI : public ARMCXXABI {
506 public:
507 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
508 Use32BitVTableOffsetABI = true;
511 // ARM64 libraries are prepared for non-unique RTTI.
512 bool shouldRTTIBeUnique() const override { return false; }
515 class FuchsiaCXXABI final : public ItaniumCXXABI {
516 public:
517 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
518 : ItaniumCXXABI(CGM) {}
520 private:
521 bool constructorsAndDestructorsReturnThis() const override { return true; }
524 class WebAssemblyCXXABI final : public ItaniumCXXABI {
525 public:
526 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
527 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
528 /*UseARMGuardVarABI=*/true) {}
529 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
530 llvm::CallInst *
531 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
532 llvm::Value *Exn) override;
534 private:
535 bool constructorsAndDestructorsReturnThis() const override { return true; }
536 bool canCallMismatchedFunctionType() const override { return false; }
539 class XLCXXABI final : public ItaniumCXXABI {
540 public:
541 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
542 : ItaniumCXXABI(CGM) {}
544 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
545 llvm::FunctionCallee dtor,
546 llvm::Constant *addr) override;
548 bool useSinitAndSterm() const override { return true; }
550 private:
551 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
552 llvm::Constant *addr);
556 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
557 switch (CGM.getContext().getCXXABIKind()) {
558 // For IR-generation purposes, there's no significant difference
559 // between the ARM and iOS ABIs.
560 case TargetCXXABI::GenericARM:
561 case TargetCXXABI::iOS:
562 case TargetCXXABI::WatchOS:
563 return new ARMCXXABI(CGM);
565 case TargetCXXABI::AppleARM64:
566 return new AppleARM64CXXABI(CGM);
568 case TargetCXXABI::Fuchsia:
569 return new FuchsiaCXXABI(CGM);
571 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
572 // include the other 32-bit ARM oddities: constructor/destructor return values
573 // and array cookies.
574 case TargetCXXABI::GenericAArch64:
575 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
576 /*UseARMGuardVarABI=*/true);
578 case TargetCXXABI::GenericMIPS:
579 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
581 case TargetCXXABI::WebAssembly:
582 return new WebAssemblyCXXABI(CGM);
584 case TargetCXXABI::XL:
585 return new XLCXXABI(CGM);
587 case TargetCXXABI::GenericItanium:
588 return new ItaniumCXXABI(CGM);
590 case TargetCXXABI::Microsoft:
591 llvm_unreachable("Microsoft ABI is not Itanium-based");
593 llvm_unreachable("bad ABI kind");
596 llvm::Type *
597 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
598 if (MPT->isMemberDataPointer())
599 return CGM.PtrDiffTy;
600 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
603 /// In the Itanium and ARM ABIs, method pointers have the form:
604 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
606 /// In the Itanium ABI:
607 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
608 /// - the this-adjustment is (memptr.adj)
609 /// - the virtual offset is (memptr.ptr - 1)
611 /// In the ARM ABI:
612 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
613 /// - the this-adjustment is (memptr.adj >> 1)
614 /// - the virtual offset is (memptr.ptr)
615 /// ARM uses 'adj' for the virtual flag because Thumb functions
616 /// may be only single-byte aligned.
618 /// If the member is virtual, the adjusted 'this' pointer points
619 /// to a vtable pointer from which the virtual offset is applied.
621 /// If the member is non-virtual, memptr.ptr is the address of
622 /// the function to call.
623 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
624 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
625 llvm::Value *&ThisPtrForCall,
626 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
627 CGBuilderTy &Builder = CGF.Builder;
629 const FunctionProtoType *FPT =
630 MPT->getPointeeType()->castAs<FunctionProtoType>();
631 auto *RD =
632 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
634 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
636 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
637 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
638 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
640 // Extract memptr.adj, which is in the second field.
641 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
643 // Compute the true adjustment.
644 llvm::Value *Adj = RawAdj;
645 if (UseARMMethodPtrABI)
646 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
648 // Apply the adjustment and cast back to the original struct type
649 // for consistency.
650 llvm::Value *This = ThisAddr.emitRawPointer(CGF);
651 This = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), This, Adj);
652 ThisPtrForCall = This;
654 // Load the function pointer.
655 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
657 // If the LSB in the function pointer is 1, the function pointer points to
658 // a virtual function.
659 llvm::Value *IsVirtual;
660 if (UseARMMethodPtrABI)
661 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
662 else
663 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
664 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
665 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
667 // In the virtual path, the adjustment left 'This' pointing to the
668 // vtable of the correct base subobject. The "function pointer" is an
669 // offset within the vtable (+1 for the virtual flag on non-ARM).
670 CGF.EmitBlock(FnVirtual);
672 // Cast the adjusted this to a pointer to vtable pointer and load.
673 llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
674 CharUnits VTablePtrAlign =
675 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
676 CGF.getPointerAlign());
677 llvm::Value *VTable = CGF.GetVTablePtr(
678 Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
680 // Apply the offset.
681 // On ARM64, to reserve extra space in virtual member function pointers,
682 // we only pay attention to the low 32 bits of the offset.
683 llvm::Value *VTableOffset = FnAsInt;
684 if (!UseARMMethodPtrABI)
685 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
686 if (Use32BitVTableOffsetABI) {
687 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
688 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
691 // Check the address of the function pointer if CFI on member function
692 // pointers is enabled.
693 llvm::Constant *CheckSourceLocation;
694 llvm::Constant *CheckTypeDesc;
695 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
696 CGM.HasHiddenLTOVisibility(RD);
697 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
698 CGM.HasHiddenLTOVisibility(RD);
699 bool ShouldEmitWPDInfo =
700 CGM.getCodeGenOpts().WholeProgramVTables &&
701 // Don't insert type tests if we are forcing public visibility.
702 !CGM.AlwaysHasLTOVisibilityPublic(RD);
703 llvm::Value *VirtualFn = nullptr;
706 CodeGenFunction::SanitizerScope SanScope(&CGF);
707 llvm::Value *TypeId = nullptr;
708 llvm::Value *CheckResult = nullptr;
710 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
711 // If doing CFI, VFE or WPD, we will need the metadata node to check
712 // against.
713 llvm::Metadata *MD =
714 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
715 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
718 if (ShouldEmitVFEInfo) {
719 llvm::Value *VFPAddr =
720 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
722 // If doing VFE, load from the vtable with a type.checked.load intrinsic
723 // call. Note that we use the GEP to calculate the address to load from
724 // and pass 0 as the offset to the intrinsic. This is because every
725 // vtable slot of the correct type is marked with matching metadata, and
726 // we know that the load must be from one of these slots.
727 llvm::Value *CheckedLoad = Builder.CreateCall(
728 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
729 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
730 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
731 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
732 } else {
733 // When not doing VFE, emit a normal load, as it allows more
734 // optimisations than type.checked.load.
735 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
736 llvm::Value *VFPAddr =
737 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
738 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
739 ? llvm::Intrinsic::type_test
740 : llvm::Intrinsic::public_type_test;
742 CheckResult =
743 Builder.CreateCall(CGM.getIntrinsic(IID), {VFPAddr, TypeId});
746 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
747 VirtualFn = CGF.Builder.CreateCall(
748 CGM.getIntrinsic(llvm::Intrinsic::load_relative,
749 {VTableOffset->getType()}),
750 {VTable, VTableOffset});
751 } else {
752 llvm::Value *VFPAddr =
753 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
754 VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr,
755 CGF.getPointerAlign(),
756 "memptr.virtualfn");
759 assert(VirtualFn && "Virtual fuction pointer not created!");
760 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
761 CheckResult) &&
762 "Check result required but not created!");
764 if (ShouldEmitCFICheck) {
765 // If doing CFI, emit the check.
766 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
767 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
768 llvm::Constant *StaticData[] = {
769 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
770 CheckSourceLocation,
771 CheckTypeDesc,
774 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
775 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
776 } else {
777 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
778 CGM.getLLVMContext(),
779 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
780 llvm::Value *ValidVtable = Builder.CreateCall(
781 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
782 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
783 SanitizerHandler::CFICheckFail, StaticData,
784 {VTable, ValidVtable});
787 FnVirtual = Builder.GetInsertBlock();
789 } // End of sanitizer scope
791 CGF.EmitBranch(FnEnd);
793 // In the non-virtual path, the function pointer is actually a
794 // function pointer.
795 CGF.EmitBlock(FnNonVirtual);
796 llvm::Value *NonVirtualFn =
797 Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn");
799 // Check the function pointer if CFI on member function pointers is enabled.
800 if (ShouldEmitCFICheck) {
801 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
802 if (RD->hasDefinition()) {
803 CodeGenFunction::SanitizerScope SanScope(&CGF);
805 llvm::Constant *StaticData[] = {
806 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
807 CheckSourceLocation,
808 CheckTypeDesc,
811 llvm::Value *Bit = Builder.getFalse();
812 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
813 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
814 getContext().getMemberPointerType(
815 MPT->getPointeeType(),
816 getContext().getRecordType(Base).getTypePtr()));
817 llvm::Value *TypeId =
818 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
820 llvm::Value *TypeTest =
821 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
822 {NonVirtualFn, TypeId});
823 Bit = Builder.CreateOr(Bit, TypeTest);
826 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
827 SanitizerHandler::CFICheckFail, StaticData,
828 {NonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
830 FnNonVirtual = Builder.GetInsertBlock();
834 // We're done.
835 CGF.EmitBlock(FnEnd);
836 llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2);
837 CalleePtr->addIncoming(VirtualFn, FnVirtual);
838 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
840 CGPointerAuthInfo PointerAuth;
842 if (const auto &Schema =
843 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers) {
844 llvm::PHINode *DiscriminatorPHI = Builder.CreatePHI(CGF.IntPtrTy, 2);
845 DiscriminatorPHI->addIncoming(llvm::ConstantInt::get(CGF.IntPtrTy, 0),
846 FnVirtual);
847 const auto &AuthInfo =
848 CGM.getMemberFunctionPointerAuthInfo(QualType(MPT, 0));
849 assert(Schema.getKey() == AuthInfo.getKey() &&
850 "Keys for virtual and non-virtual member functions must match");
851 auto *NonVirtualDiscriminator = AuthInfo.getDiscriminator();
852 DiscriminatorPHI->addIncoming(NonVirtualDiscriminator, FnNonVirtual);
853 PointerAuth = CGPointerAuthInfo(
854 Schema.getKey(), Schema.getAuthenticationMode(), Schema.isIsaPointer(),
855 Schema.authenticatesNullValues(), DiscriminatorPHI);
858 CGCallee Callee(FPT, CalleePtr, PointerAuth);
859 return Callee;
862 /// Compute an l-value by applying the given pointer-to-member to a
863 /// base object.
864 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
865 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
866 const MemberPointerType *MPT) {
867 assert(MemPtr->getType() == CGM.PtrDiffTy);
869 CGBuilderTy &Builder = CGF.Builder;
871 // Apply the offset, which we assume is non-null.
872 return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.emitRawPointer(CGF), MemPtr,
873 "memptr.offset");
876 // See if it's possible to return a constant signed pointer.
877 static llvm::Constant *pointerAuthResignConstant(
878 llvm::Value *Ptr, const CGPointerAuthInfo &CurAuthInfo,
879 const CGPointerAuthInfo &NewAuthInfo, CodeGenModule &CGM) {
880 const auto *CPA = dyn_cast<llvm::ConstantPtrAuth>(Ptr);
882 if (!CPA)
883 return nullptr;
885 assert(CPA->getKey()->getZExtValue() == CurAuthInfo.getKey() &&
886 CPA->getAddrDiscriminator()->isZeroValue() &&
887 CPA->getDiscriminator() == CurAuthInfo.getDiscriminator() &&
888 "unexpected key or discriminators");
890 return CGM.getConstantSignedPointer(
891 CPA->getPointer(), NewAuthInfo.getKey(), nullptr,
892 cast<llvm::ConstantInt>(NewAuthInfo.getDiscriminator()));
895 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
896 /// conversion.
898 /// Bitcast conversions are always a no-op under Itanium.
900 /// Obligatory offset/adjustment diagram:
901 /// <-- offset --> <-- adjustment -->
902 /// |--------------------------|----------------------|--------------------|
903 /// ^Derived address point ^Base address point ^Member address point
905 /// So when converting a base member pointer to a derived member pointer,
906 /// we add the offset to the adjustment because the address point has
907 /// decreased; and conversely, when converting a derived MP to a base MP
908 /// we subtract the offset from the adjustment because the address point
909 /// has increased.
911 /// The standard forbids (at compile time) conversion to and from
912 /// virtual bases, which is why we don't have to consider them here.
914 /// The standard forbids (at run time) casting a derived MP to a base
915 /// MP when the derived MP does not point to a member of the base.
916 /// This is why -1 is a reasonable choice for null data member
917 /// pointers.
918 llvm::Value *
919 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
920 const CastExpr *E,
921 llvm::Value *src) {
922 // Use constant emission if we can.
923 if (isa<llvm::Constant>(src))
924 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
926 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
927 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
928 E->getCastKind() == CK_ReinterpretMemberPointer);
930 CGBuilderTy &Builder = CGF.Builder;
931 QualType DstType = E->getType();
933 if (DstType->isMemberFunctionPointerType()) {
934 if (const auto &NewAuthInfo =
935 CGM.getMemberFunctionPointerAuthInfo(DstType)) {
936 QualType SrcType = E->getSubExpr()->getType();
937 assert(SrcType->isMemberFunctionPointerType());
938 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(SrcType);
939 llvm::Value *MemFnPtr = Builder.CreateExtractValue(src, 0, "memptr.ptr");
940 llvm::Type *OrigTy = MemFnPtr->getType();
942 llvm::BasicBlock *StartBB = Builder.GetInsertBlock();
943 llvm::BasicBlock *ResignBB = CGF.createBasicBlock("resign");
944 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("merge");
946 // Check whether we have a virtual offset or a pointer to a function.
947 assert(UseARMMethodPtrABI && "ARM ABI expected");
948 llvm::Value *Adj = Builder.CreateExtractValue(src, 1, "memptr.adj");
949 llvm::Constant *Ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
950 llvm::Value *AndVal = Builder.CreateAnd(Adj, Ptrdiff_1);
951 llvm::Value *IsVirtualOffset =
952 Builder.CreateIsNotNull(AndVal, "is.virtual.offset");
953 Builder.CreateCondBr(IsVirtualOffset, MergeBB, ResignBB);
955 CGF.EmitBlock(ResignBB);
956 llvm::Type *PtrTy = llvm::PointerType::getUnqual(CGM.Int8Ty);
957 MemFnPtr = Builder.CreateIntToPtr(MemFnPtr, PtrTy);
958 MemFnPtr =
959 CGF.emitPointerAuthResign(MemFnPtr, SrcType, CurAuthInfo, NewAuthInfo,
960 isa<llvm::Constant>(src));
961 MemFnPtr = Builder.CreatePtrToInt(MemFnPtr, OrigTy);
962 llvm::Value *ResignedVal = Builder.CreateInsertValue(src, MemFnPtr, 0);
963 ResignBB = Builder.GetInsertBlock();
965 CGF.EmitBlock(MergeBB);
966 llvm::PHINode *NewSrc = Builder.CreatePHI(src->getType(), 2);
967 NewSrc->addIncoming(src, StartBB);
968 NewSrc->addIncoming(ResignedVal, ResignBB);
969 src = NewSrc;
973 // Under Itanium, reinterprets don't require any additional processing.
974 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
976 llvm::Constant *adj = getMemberPointerAdjustment(E);
977 if (!adj) return src;
979 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
981 const MemberPointerType *destTy =
982 E->getType()->castAs<MemberPointerType>();
984 // For member data pointers, this is just a matter of adding the
985 // offset if the source is non-null.
986 if (destTy->isMemberDataPointer()) {
987 llvm::Value *dst;
988 if (isDerivedToBase)
989 dst = Builder.CreateNSWSub(src, adj, "adj");
990 else
991 dst = Builder.CreateNSWAdd(src, adj, "adj");
993 // Null check.
994 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
995 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
996 return Builder.CreateSelect(isNull, src, dst);
999 // The this-adjustment is left-shifted by 1 on ARM.
1000 if (UseARMMethodPtrABI) {
1001 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
1002 offset <<= 1;
1003 adj = llvm::ConstantInt::get(adj->getType(), offset);
1006 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
1007 llvm::Value *dstAdj;
1008 if (isDerivedToBase)
1009 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
1010 else
1011 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
1013 return Builder.CreateInsertValue(src, dstAdj, 1);
1016 static llvm::Constant *
1017 pointerAuthResignMemberFunctionPointer(llvm::Constant *Src, QualType DestType,
1018 QualType SrcType, CodeGenModule &CGM) {
1019 assert(DestType->isMemberFunctionPointerType() &&
1020 SrcType->isMemberFunctionPointerType() &&
1021 "member function pointers expected");
1022 if (DestType == SrcType)
1023 return Src;
1025 const auto &NewAuthInfo = CGM.getMemberFunctionPointerAuthInfo(DestType);
1026 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(SrcType);
1028 if (!NewAuthInfo && !CurAuthInfo)
1029 return Src;
1031 llvm::Constant *MemFnPtr = Src->getAggregateElement(0u);
1032 if (MemFnPtr->getNumOperands() == 0) {
1033 // src must be a pair of null pointers.
1034 assert(isa<llvm::ConstantInt>(MemFnPtr) && "constant int expected");
1035 return Src;
1038 llvm::Constant *ConstPtr = pointerAuthResignConstant(
1039 cast<llvm::User>(MemFnPtr)->getOperand(0), CurAuthInfo, NewAuthInfo, CGM);
1040 ConstPtr = llvm::ConstantExpr::getPtrToInt(ConstPtr, MemFnPtr->getType());
1041 return ConstantFoldInsertValueInstruction(Src, ConstPtr, 0);
1044 llvm::Constant *
1045 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
1046 llvm::Constant *src) {
1047 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
1048 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
1049 E->getCastKind() == CK_ReinterpretMemberPointer);
1051 QualType DstType = E->getType();
1053 if (DstType->isMemberFunctionPointerType())
1054 src = pointerAuthResignMemberFunctionPointer(
1055 src, DstType, E->getSubExpr()->getType(), CGM);
1057 // Under Itanium, reinterprets don't require any additional processing.
1058 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
1060 // If the adjustment is trivial, we don't need to do anything.
1061 llvm::Constant *adj = getMemberPointerAdjustment(E);
1062 if (!adj) return src;
1064 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
1066 const MemberPointerType *destTy =
1067 E->getType()->castAs<MemberPointerType>();
1069 // For member data pointers, this is just a matter of adding the
1070 // offset if the source is non-null.
1071 if (destTy->isMemberDataPointer()) {
1072 // null maps to null.
1073 if (src->isAllOnesValue()) return src;
1075 if (isDerivedToBase)
1076 return llvm::ConstantExpr::getNSWSub(src, adj);
1077 else
1078 return llvm::ConstantExpr::getNSWAdd(src, adj);
1081 // The this-adjustment is left-shifted by 1 on ARM.
1082 if (UseARMMethodPtrABI) {
1083 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
1084 offset <<= 1;
1085 adj = llvm::ConstantInt::get(adj->getType(), offset);
1088 llvm::Constant *srcAdj = src->getAggregateElement(1);
1089 llvm::Constant *dstAdj;
1090 if (isDerivedToBase)
1091 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
1092 else
1093 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
1095 llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
1096 assert(res != nullptr && "Folding must succeed");
1097 return res;
1100 llvm::Constant *
1101 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1102 // Itanium C++ ABI 2.3:
1103 // A NULL pointer is represented as -1.
1104 if (MPT->isMemberDataPointer())
1105 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
1107 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1108 llvm::Constant *Values[2] = { Zero, Zero };
1109 return llvm::ConstantStruct::getAnon(Values);
1112 llvm::Constant *
1113 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1114 CharUnits offset) {
1115 // Itanium C++ ABI 2.3:
1116 // A pointer to data member is an offset from the base address of
1117 // the class object containing it, represented as a ptrdiff_t
1118 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1121 llvm::Constant *
1122 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1123 return BuildMemberPointer(MD, CharUnits::Zero());
1126 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1127 CharUnits ThisAdjustment) {
1128 assert(MD->isInstance() && "Member function must not be static!");
1130 CodeGenTypes &Types = CGM.getTypes();
1132 // Get the function pointer (or index if this is a virtual function).
1133 llvm::Constant *MemPtr[2];
1134 if (MD->isVirtual()) {
1135 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1136 uint64_t VTableOffset;
1137 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1138 // Multiply by 4-byte relative offsets.
1139 VTableOffset = Index * 4;
1140 } else {
1141 const ASTContext &Context = getContext();
1142 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1143 Context.getTargetInfo().getPointerWidth(LangAS::Default));
1144 VTableOffset = Index * PointerWidth.getQuantity();
1147 if (UseARMMethodPtrABI) {
1148 // ARM C++ ABI 3.2.1:
1149 // This ABI specifies that adj contains twice the this
1150 // adjustment, plus 1 if the member function is virtual. The
1151 // least significant bit of adj then makes exactly the same
1152 // discrimination as the least significant bit of ptr does for
1153 // Itanium.
1155 // We cannot use the Itanium ABI's representation for virtual member
1156 // function pointers under pointer authentication because it would
1157 // require us to store both the virtual offset and the constant
1158 // discriminator in the pointer, which would be immediately vulnerable
1159 // to attack. Instead we introduce a thunk that does the virtual dispatch
1160 // and store it as if it were a non-virtual member function. This means
1161 // that virtual function pointers may not compare equal anymore, but
1162 // fortunately they aren't required to by the standard, and we do make
1163 // a best-effort attempt to re-use the thunk.
1165 // To support interoperation with code in which pointer authentication
1166 // is disabled, derefencing a member function pointer must still handle
1167 // the virtual case, but it can use a discriminator which should never
1168 // be valid.
1169 const auto &Schema =
1170 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers;
1171 if (Schema)
1172 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(
1173 getSignedVirtualMemberFunctionPointer(MD), CGM.PtrDiffTy);
1174 else
1175 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1176 // Don't set the LSB of adj to 1 if pointer authentication for member
1177 // function pointers is enabled.
1178 MemPtr[1] = llvm::ConstantInt::get(
1179 CGM.PtrDiffTy, 2 * ThisAdjustment.getQuantity() + !Schema);
1180 } else {
1181 // Itanium C++ ABI 2.3:
1182 // For a virtual function, [the pointer field] is 1 plus the
1183 // virtual table offset (in bytes) of the function,
1184 // represented as a ptrdiff_t.
1185 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1186 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1187 ThisAdjustment.getQuantity());
1189 } else {
1190 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1191 llvm::Type *Ty;
1192 // Check whether the function has a computable LLVM signature.
1193 if (Types.isFuncTypeConvertible(FPT)) {
1194 // The function has a computable LLVM signature; use the correct type.
1195 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1196 } else {
1197 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1198 // function type is incomplete.
1199 Ty = CGM.PtrDiffTy;
1201 llvm::Constant *addr = CGM.getMemberFunctionPointer(MD, Ty);
1203 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1204 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1205 (UseARMMethodPtrABI ? 2 : 1) *
1206 ThisAdjustment.getQuantity());
1209 return llvm::ConstantStruct::getAnon(MemPtr);
1212 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1213 QualType MPType) {
1214 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1215 const ValueDecl *MPD = MP.getMemberPointerDecl();
1216 if (!MPD)
1217 return EmitNullMemberPointer(MPT);
1219 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1221 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) {
1222 llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment);
1223 QualType SrcType = getContext().getMemberPointerType(
1224 MD->getType(), MD->getParent()->getTypeForDecl());
1225 return pointerAuthResignMemberFunctionPointer(Src, MPType, SrcType, CGM);
1228 CharUnits FieldOffset =
1229 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1230 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1233 /// The comparison algorithm is pretty easy: the member pointers are
1234 /// the same if they're either bitwise identical *or* both null.
1236 /// ARM is different here only because null-ness is more complicated.
1237 llvm::Value *
1238 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1239 llvm::Value *L,
1240 llvm::Value *R,
1241 const MemberPointerType *MPT,
1242 bool Inequality) {
1243 CGBuilderTy &Builder = CGF.Builder;
1245 llvm::ICmpInst::Predicate Eq;
1246 llvm::Instruction::BinaryOps And, Or;
1247 if (Inequality) {
1248 Eq = llvm::ICmpInst::ICMP_NE;
1249 And = llvm::Instruction::Or;
1250 Or = llvm::Instruction::And;
1251 } else {
1252 Eq = llvm::ICmpInst::ICMP_EQ;
1253 And = llvm::Instruction::And;
1254 Or = llvm::Instruction::Or;
1257 // Member data pointers are easy because there's a unique null
1258 // value, so it just comes down to bitwise equality.
1259 if (MPT->isMemberDataPointer())
1260 return Builder.CreateICmp(Eq, L, R);
1262 // For member function pointers, the tautologies are more complex.
1263 // The Itanium tautology is:
1264 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1265 // The ARM tautology is:
1266 // (L == R) <==> (L.ptr == R.ptr &&
1267 // (L.adj == R.adj ||
1268 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1269 // The inequality tautologies have exactly the same structure, except
1270 // applying De Morgan's laws.
1272 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1273 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1275 // This condition tests whether L.ptr == R.ptr. This must always be
1276 // true for equality to hold.
1277 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1279 // This condition, together with the assumption that L.ptr == R.ptr,
1280 // tests whether the pointers are both null. ARM imposes an extra
1281 // condition.
1282 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1283 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1285 // This condition tests whether L.adj == R.adj. If this isn't
1286 // true, the pointers are unequal unless they're both null.
1287 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1288 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1289 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1291 // Null member function pointers on ARM clear the low bit of Adj,
1292 // so the zero condition has to check that neither low bit is set.
1293 if (UseARMMethodPtrABI) {
1294 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1296 // Compute (l.adj | r.adj) & 1 and test it against zero.
1297 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1298 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1299 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1300 "cmp.or.adj");
1301 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1304 // Tie together all our conditions.
1305 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1306 Result = Builder.CreateBinOp(And, PtrEq, Result,
1307 Inequality ? "memptr.ne" : "memptr.eq");
1308 return Result;
1311 llvm::Value *
1312 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1313 llvm::Value *MemPtr,
1314 const MemberPointerType *MPT) {
1315 CGBuilderTy &Builder = CGF.Builder;
1317 /// For member data pointers, this is just a check against -1.
1318 if (MPT->isMemberDataPointer()) {
1319 assert(MemPtr->getType() == CGM.PtrDiffTy);
1320 llvm::Value *NegativeOne =
1321 llvm::Constant::getAllOnesValue(MemPtr->getType());
1322 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1325 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1326 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1328 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1329 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1331 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1332 // (the virtual bit) is set.
1333 if (UseARMMethodPtrABI) {
1334 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1335 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1336 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1337 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1338 "memptr.isvirtual");
1339 Result = Builder.CreateOr(Result, IsVirtual);
1342 return Result;
1345 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1346 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1347 if (!RD)
1348 return false;
1350 // If C++ prohibits us from making a copy, return by address.
1351 if (!RD->canPassInRegisters()) {
1352 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1353 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1354 return true;
1356 return false;
1359 /// The Itanium ABI requires non-zero initialization only for data
1360 /// member pointers, for which '0' is a valid offset.
1361 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1362 return MPT->isMemberFunctionPointer();
1365 /// The Itanium ABI always places an offset to the complete object
1366 /// at entry -2 in the vtable.
1367 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1368 const CXXDeleteExpr *DE,
1369 Address Ptr,
1370 QualType ElementType,
1371 const CXXDestructorDecl *Dtor) {
1372 bool UseGlobalDelete = DE->isGlobalDelete();
1373 if (UseGlobalDelete) {
1374 // Derive the complete-object pointer, which is what we need
1375 // to pass to the deallocation function.
1377 // Grab the vtable pointer as an intptr_t*.
1378 auto *ClassDecl =
1379 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1380 llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
1382 // Track back to entry -2 and pull out the offset there.
1383 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1384 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1385 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,
1386 CGF.getPointerAlign());
1388 // Apply the offset.
1389 llvm::Value *CompletePtr = Ptr.emitRawPointer(CGF);
1390 CompletePtr =
1391 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1393 // If we're supposed to call the global delete, make sure we do so
1394 // even if the destructor throws.
1395 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1396 ElementType);
1399 // FIXME: Provide a source location here even though there's no
1400 // CXXMemberCallExpr for dtor call.
1401 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1402 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE,
1403 /*CallOrInvoke=*/nullptr);
1405 if (UseGlobalDelete)
1406 CGF.PopCleanupBlock();
1409 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1410 // void __cxa_rethrow();
1412 llvm::FunctionType *FTy =
1413 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1415 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1417 if (isNoReturn)
1418 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, {});
1419 else
1420 CGF.EmitRuntimeCallOrInvoke(Fn);
1423 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1424 // void *__cxa_allocate_exception(size_t thrown_size);
1426 llvm::FunctionType *FTy =
1427 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1429 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1432 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1433 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1434 // void (*dest) (void *));
1436 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
1437 llvm::FunctionType *FTy =
1438 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1440 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1443 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1444 QualType ThrowType = E->getSubExpr()->getType();
1445 // Now allocate the exception object.
1446 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1447 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1449 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1450 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1451 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1453 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1454 CGF.EmitAnyExprToExn(
1455 E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1457 // Now throw the exception.
1458 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1459 /*ForEH=*/true);
1461 // The address of the destructor. If the exception type has a
1462 // trivial destructor (or isn't a record), we just pass null.
1463 llvm::Constant *Dtor = nullptr;
1464 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1465 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1466 if (!Record->hasTrivialDestructor()) {
1467 // __cxa_throw is declared to take its destructor as void (*)(void *). We
1468 // must match that if function pointers can be authenticated with a
1469 // discriminator based on their type.
1470 const ASTContext &Ctx = getContext();
1471 QualType DtorTy = Ctx.getFunctionType(Ctx.VoidTy, {Ctx.VoidPtrTy},
1472 FunctionProtoType::ExtProtoInfo());
1474 CXXDestructorDecl *DtorD = Record->getDestructor();
1475 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1476 Dtor = CGM.getFunctionPointer(Dtor, DtorTy);
1479 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1481 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1482 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1485 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1486 // void *__dynamic_cast(const void *sub,
1487 // global_as const abi::__class_type_info *src,
1488 // global_as const abi::__class_type_info *dst,
1489 // std::ptrdiff_t src2dst_offset);
1491 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1492 llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy;
1493 llvm::Type *PtrDiffTy =
1494 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1496 llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy };
1498 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1500 // Mark the function as nounwind willreturn readonly.
1501 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
1502 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1503 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
1504 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
1505 llvm::AttributeList Attrs = llvm::AttributeList::get(
1506 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1508 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1511 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1512 // void __cxa_bad_cast();
1513 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1514 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1517 /// Compute the src2dst_offset hint as described in the
1518 /// Itanium C++ ABI [2.9.7]
1519 static CharUnits computeOffsetHint(ASTContext &Context,
1520 const CXXRecordDecl *Src,
1521 const CXXRecordDecl *Dst) {
1522 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1523 /*DetectVirtual=*/false);
1525 // If Dst is not derived from Src we can skip the whole computation below and
1526 // return that Src is not a public base of Dst. Record all inheritance paths.
1527 if (!Dst->isDerivedFrom(Src, Paths))
1528 return CharUnits::fromQuantity(-2ULL);
1530 unsigned NumPublicPaths = 0;
1531 CharUnits Offset;
1533 // Now walk all possible inheritance paths.
1534 for (const CXXBasePath &Path : Paths) {
1535 if (Path.Access != AS_public) // Ignore non-public inheritance.
1536 continue;
1538 ++NumPublicPaths;
1540 for (const CXXBasePathElement &PathElement : Path) {
1541 // If the path contains a virtual base class we can't give any hint.
1542 // -1: no hint.
1543 if (PathElement.Base->isVirtual())
1544 return CharUnits::fromQuantity(-1ULL);
1546 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1547 continue;
1549 // Accumulate the base class offsets.
1550 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1551 Offset += L.getBaseClassOffset(
1552 PathElement.Base->getType()->getAsCXXRecordDecl());
1556 // -2: Src is not a public base of Dst.
1557 if (NumPublicPaths == 0)
1558 return CharUnits::fromQuantity(-2ULL);
1560 // -3: Src is a multiple public base type but never a virtual base type.
1561 if (NumPublicPaths > 1)
1562 return CharUnits::fromQuantity(-3ULL);
1564 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1565 // Return the offset of Src from the origin of Dst.
1566 return Offset;
1569 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1570 // void __cxa_bad_typeid();
1571 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1573 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1576 bool ItaniumCXXABI::shouldTypeidBeNullChecked(QualType SrcRecordTy) {
1577 return true;
1580 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1581 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1582 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1583 Call->setDoesNotReturn();
1584 CGF.Builder.CreateUnreachable();
1587 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1588 QualType SrcRecordTy,
1589 Address ThisPtr,
1590 llvm::Type *StdTypeInfoPtrTy) {
1591 auto *ClassDecl =
1592 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1593 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, CGM.GlobalsInt8PtrTy,
1594 ClassDecl);
1596 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1597 // Load the type info.
1598 Value = CGF.Builder.CreateCall(
1599 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1600 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1601 } else {
1602 // Load the type info.
1603 Value =
1604 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1606 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1607 CGF.getPointerAlign());
1610 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1611 QualType SrcRecordTy) {
1612 return SrcIsPtr;
1615 llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
1616 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1617 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1618 llvm::Type *PtrDiffLTy =
1619 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1621 llvm::Value *SrcRTTI =
1622 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1623 llvm::Value *DestRTTI =
1624 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1626 // Compute the offset hint.
1627 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1628 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1629 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1630 PtrDiffLTy,
1631 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1633 // Emit the call to __dynamic_cast.
1634 llvm::Value *Value = ThisAddr.emitRawPointer(CGF);
1635 if (CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) {
1636 // We perform a no-op load of the vtable pointer here to force an
1637 // authentication. In environments that do not support pointer
1638 // authentication this is a an actual no-op that will be elided. When
1639 // pointer authentication is supported and enforced on vtable pointers this
1640 // load can trap.
1641 llvm::Value *Vtable =
1642 CGF.GetVTablePtr(ThisAddr, CGM.Int8PtrTy, SrcDecl,
1643 CodeGenFunction::VTableAuthMode::MustTrap);
1644 assert(Vtable);
1645 (void)Vtable;
1648 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1649 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1651 /// C++ [expr.dynamic.cast]p9:
1652 /// A failed cast to reference type throws std::bad_cast
1653 if (DestTy->isReferenceType()) {
1654 llvm::BasicBlock *BadCastBlock =
1655 CGF.createBasicBlock("dynamic_cast.bad_cast");
1657 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1658 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1660 CGF.EmitBlock(BadCastBlock);
1661 EmitBadCastCall(CGF);
1664 return Value;
1667 llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
1668 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1669 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess,
1670 llvm::BasicBlock *CastFail) {
1671 ASTContext &Context = getContext();
1673 // Find all the inheritance paths.
1674 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1675 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1676 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1677 /*DetectVirtual=*/false);
1678 (void)DestDecl->isDerivedFrom(SrcDecl, Paths);
1680 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
1681 // might appear.
1682 std::optional<CharUnits> Offset;
1683 for (const CXXBasePath &Path : Paths) {
1684 // dynamic_cast only finds public inheritance paths.
1685 if (Path.Access != AS_public)
1686 continue;
1688 CharUnits PathOffset;
1689 for (const CXXBasePathElement &PathElement : Path) {
1690 // Find the offset along this inheritance step.
1691 const CXXRecordDecl *Base =
1692 PathElement.Base->getType()->getAsCXXRecordDecl();
1693 if (PathElement.Base->isVirtual()) {
1694 // For a virtual base class, we know that the derived class is exactly
1695 // DestDecl, so we can use the vbase offset from its layout.
1696 const ASTRecordLayout &L = Context.getASTRecordLayout(DestDecl);
1697 PathOffset = L.getVBaseClassOffset(Base);
1698 } else {
1699 const ASTRecordLayout &L =
1700 Context.getASTRecordLayout(PathElement.Class);
1701 PathOffset += L.getBaseClassOffset(Base);
1705 if (!Offset)
1706 Offset = PathOffset;
1707 else if (Offset != PathOffset) {
1708 // Base appears in at least two different places. Find the most-derived
1709 // object and see if it's a DestDecl. Note that the most-derived object
1710 // must be at least as aligned as this base class subobject, and must
1711 // have a vptr at offset 0.
1712 ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy),
1713 CGF.VoidPtrTy, ThisAddr.getAlignment());
1714 SrcDecl = DestDecl;
1715 Offset = CharUnits::Zero();
1716 break;
1720 if (!Offset) {
1721 // If there are no public inheritance paths, the cast always fails.
1722 CGF.EmitBranch(CastFail);
1723 return llvm::PoisonValue::get(CGF.VoidPtrTy);
1726 // Compare the vptr against the expected vptr for the destination type at
1727 // this offset. Note that we do not know what type ThisAddr points to in
1728 // the case where the derived class multiply inherits from the base class
1729 // so we can't use GetVTablePtr, so we load the vptr directly instead.
1730 llvm::Instruction *VPtr = CGF.Builder.CreateLoad(
1731 ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable");
1732 CGM.DecorateInstructionWithTBAA(
1733 VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy));
1734 llvm::Value *Success = CGF.Builder.CreateICmpEQ(
1735 VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl));
1736 llvm::Value *Result = ThisAddr.emitRawPointer(CGF);
1737 if (!Offset->isZero())
1738 Result = CGF.Builder.CreateInBoundsGEP(
1739 CGF.CharTy, Result,
1740 {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())});
1741 CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail);
1742 return Result;
1745 llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
1746 Address ThisAddr,
1747 QualType SrcRecordTy) {
1748 auto *ClassDecl =
1749 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1750 llvm::Value *OffsetToTop;
1751 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1752 // Get the vtable pointer.
1753 llvm::Value *VTable =
1754 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
1756 // Get the offset-to-top from the vtable.
1757 OffsetToTop =
1758 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1759 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1760 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1761 } else {
1762 llvm::Type *PtrDiffLTy =
1763 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1765 // Get the vtable pointer.
1766 llvm::Value *VTable =
1767 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
1769 // Get the offset-to-top from the vtable.
1770 OffsetToTop =
1771 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1772 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1773 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1775 // Finally, add the offset to the pointer.
1776 return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.emitRawPointer(CGF),
1777 OffsetToTop);
1780 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1781 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1782 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1783 Call->setDoesNotReturn();
1784 CGF.Builder.CreateUnreachable();
1785 return true;
1788 llvm::Value *
1789 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1790 Address This,
1791 const CXXRecordDecl *ClassDecl,
1792 const CXXRecordDecl *BaseClassDecl) {
1793 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1794 CharUnits VBaseOffsetOffset =
1795 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1796 BaseClassDecl);
1797 llvm::Value *VBaseOffsetPtr =
1798 CGF.Builder.CreateConstGEP1_64(
1799 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1800 "vbase.offset.ptr");
1802 llvm::Value *VBaseOffset;
1803 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1804 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1805 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1806 "vbase.offset");
1807 } else {
1808 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1809 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1811 return VBaseOffset;
1814 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1815 // Just make sure we're in sync with TargetCXXABI.
1816 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1818 // The constructor used for constructing this as a base class;
1819 // ignores virtual bases.
1820 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1822 // The constructor used for constructing this as a complete class;
1823 // constructs the virtual bases, then calls the base constructor.
1824 if (!D->getParent()->isAbstract()) {
1825 // We don't need to emit the complete ctor if the class is abstract.
1826 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1830 CGCXXABI::AddedStructorArgCounts
1831 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1832 SmallVectorImpl<CanQualType> &ArgTys) {
1833 ASTContext &Context = getContext();
1835 // All parameters are already in place except VTT, which goes after 'this'.
1836 // These are Clang types, so we don't need to worry about sret yet.
1838 // Check if we need to add a VTT parameter (which has type global void **).
1839 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1840 : GD.getDtorType() == Dtor_Base) &&
1841 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1842 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1843 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1844 ArgTys.insert(ArgTys.begin() + 1,
1845 Context.getPointerType(CanQualType::CreateUnsafe(Q)));
1846 return AddedStructorArgCounts::prefix(1);
1848 return AddedStructorArgCounts{};
1851 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1852 // The destructor used for destructing this as a base class; ignores
1853 // virtual bases.
1854 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1856 // The destructor used for destructing this as a most-derived class;
1857 // call the base destructor and then destructs any virtual bases.
1858 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1860 // The destructor in a virtual table is always a 'deleting'
1861 // destructor, which calls the complete destructor and then uses the
1862 // appropriate operator delete.
1863 if (D->isVirtual())
1864 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1867 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1868 QualType &ResTy,
1869 FunctionArgList &Params) {
1870 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1871 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1873 // Check if we need a VTT parameter as well.
1874 if (NeedsVTTParameter(CGF.CurGD)) {
1875 ASTContext &Context = getContext();
1877 // FIXME: avoid the fake decl
1878 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1879 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
1880 QualType T = Context.getPointerType(Q);
1881 auto *VTTDecl = ImplicitParamDecl::Create(
1882 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1883 T, ImplicitParamKind::CXXVTT);
1884 Params.insert(Params.begin() + 1, VTTDecl);
1885 getStructorImplicitParamDecl(CGF) = VTTDecl;
1889 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1890 // Naked functions have no prolog.
1891 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1892 return;
1894 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1895 /// adjustments are required, because they are all handled by thunks.
1896 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1898 /// Initialize the 'vtt' slot if needed.
1899 if (getStructorImplicitParamDecl(CGF)) {
1900 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1901 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1904 /// If this is a function that the ABI specifies returns 'this', initialize
1905 /// the return slot to 'this' at the start of the function.
1907 /// Unlike the setting of return types, this is done within the ABI
1908 /// implementation instead of by clients of CGCXXABI because:
1909 /// 1) getThisValue is currently protected
1910 /// 2) in theory, an ABI could implement 'this' returns some other way;
1911 /// HasThisReturn only specifies a contract, not the implementation
1912 if (HasThisReturn(CGF.CurGD))
1913 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1916 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1917 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1918 bool ForVirtualBase, bool Delegating) {
1919 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1920 return AddedStructorArgs{};
1922 // Insert the implicit 'vtt' argument as the second argument. Make sure to
1923 // correctly reflect its address space, which can differ from generic on
1924 // some targets.
1925 llvm::Value *VTT =
1926 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1927 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
1928 QualType Q = getContext().getAddrSpaceQualType(getContext().VoidPtrTy, AS);
1929 QualType VTTTy = getContext().getPointerType(Q);
1930 return AddedStructorArgs::prefix({{VTT, VTTTy}});
1933 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1934 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1935 bool ForVirtualBase, bool Delegating) {
1936 GlobalDecl GD(DD, Type);
1937 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1940 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1941 const CXXDestructorDecl *DD,
1942 CXXDtorType Type, bool ForVirtualBase,
1943 bool Delegating, Address This,
1944 QualType ThisTy) {
1945 GlobalDecl GD(DD, Type);
1946 llvm::Value *VTT =
1947 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1948 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1950 CGCallee Callee;
1951 if (getContext().getLangOpts().AppleKext &&
1952 Type != Dtor_Base && DD->isVirtual())
1953 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1954 else
1955 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1957 CGF.EmitCXXDestructorCall(GD, Callee, CGF.getAsNaturalPointerTo(This, ThisTy),
1958 ThisTy, VTT, VTTTy, nullptr);
1961 // Check if any non-inline method has the specified attribute.
1962 template <typename T>
1963 static bool CXXRecordNonInlineHasAttr(const CXXRecordDecl *RD) {
1964 for (const auto *D : RD->noload_decls()) {
1965 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1966 if (FD->isInlined() || FD->doesThisDeclarationHaveABody() ||
1967 FD->isPureVirtual())
1968 continue;
1969 if (D->hasAttr<T>())
1970 return true;
1974 return false;
1977 static void setVTableSelectiveDLLImportExport(CodeGenModule &CGM,
1978 llvm::GlobalVariable *VTable,
1979 const CXXRecordDecl *RD) {
1980 if (VTable->getDLLStorageClass() !=
1981 llvm::GlobalVariable::DefaultStorageClass ||
1982 RD->hasAttr<DLLImportAttr>() || RD->hasAttr<DLLExportAttr>())
1983 return;
1985 if (CGM.getVTables().isVTableExternal(RD)) {
1986 if (CXXRecordNonInlineHasAttr<DLLImportAttr>(RD))
1987 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1988 } else if (CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
1989 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1992 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1993 const CXXRecordDecl *RD) {
1994 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1995 if (VTable->hasInitializer())
1996 return;
1998 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1999 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
2000 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
2001 llvm::Constant *RTTI =
2002 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
2004 // Create and set the initializer.
2005 ConstantInitBuilder builder(CGM);
2006 auto components = builder.beginStruct();
2007 CGVT.createVTableInitializer(components, VTLayout, RTTI,
2008 llvm::GlobalValue::isLocalLinkage(Linkage));
2009 components.finishAndSetAsInitializer(VTable);
2011 // Set the correct linkage.
2012 VTable->setLinkage(Linkage);
2014 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
2015 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
2017 if (CGM.getTarget().hasPS4DLLImportExport())
2018 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2020 // Set the right visibility.
2021 CGM.setGVProperties(VTable, RD);
2023 // If this is the magic class __cxxabiv1::__fundamental_type_info,
2024 // we will emit the typeinfo for the fundamental types. This is the
2025 // same behaviour as GCC.
2026 const DeclContext *DC = RD->getDeclContext();
2027 if (RD->getIdentifier() &&
2028 RD->getIdentifier()->isStr("__fundamental_type_info") &&
2029 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
2030 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
2031 DC->getParent()->isTranslationUnit())
2032 EmitFundamentalRTTIDescriptors(RD);
2034 // Always emit type metadata on non-available_externally definitions, and on
2035 // available_externally definitions if we are performing whole program
2036 // devirtualization. For WPD we need the type metadata on all vtable
2037 // definitions to ensure we associate derived classes with base classes
2038 // defined in headers but with a strong definition only in a shared library.
2039 if (!VTable->isDeclarationForLinker() ||
2040 CGM.getCodeGenOpts().WholeProgramVTables) {
2041 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
2042 // For available_externally definitions, add the vtable to
2043 // @llvm.compiler.used so that it isn't deleted before whole program
2044 // analysis.
2045 if (VTable->isDeclarationForLinker()) {
2046 assert(CGM.getCodeGenOpts().WholeProgramVTables);
2047 CGM.addCompilerUsedGlobal(VTable);
2051 if (VTContext.isRelativeLayout()) {
2052 CGVT.RemoveHwasanMetadata(VTable);
2053 if (!VTable->isDSOLocal())
2054 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
2058 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
2059 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
2060 if (Vptr.NearestVBase == nullptr)
2061 return false;
2062 return NeedsVTTParameter(CGF.CurGD);
2065 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
2066 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2067 const CXXRecordDecl *NearestVBase) {
2069 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2070 NeedsVTTParameter(CGF.CurGD)) {
2071 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
2072 NearestVBase);
2074 return getVTableAddressPoint(Base, VTableClass);
2077 llvm::Constant *
2078 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
2079 const CXXRecordDecl *VTableClass) {
2080 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
2082 // Find the appropriate vtable within the vtable group, and the address point
2083 // within that vtable.
2084 const VTableLayout &Layout =
2085 CGM.getItaniumVTableContext().getVTableLayout(VTableClass);
2086 VTableLayout::AddressPointLocation AddressPoint =
2087 Layout.getAddressPoint(Base);
2088 llvm::Value *Indices[] = {
2089 llvm::ConstantInt::get(CGM.Int32Ty, 0),
2090 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
2091 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
2094 // Add inrange attribute to indicate that only the VTableIndex can be
2095 // accessed.
2096 unsigned ComponentSize =
2097 CGM.getDataLayout().getTypeAllocSize(CGM.getVTableComponentType());
2098 unsigned VTableSize =
2099 ComponentSize * Layout.getVTableSize(AddressPoint.VTableIndex);
2100 unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
2101 llvm::ConstantRange InRange(
2102 llvm::APInt(32, (int)-Offset, true),
2103 llvm::APInt(32, (int)(VTableSize - Offset), true));
2104 return llvm::ConstantExpr::getGetElementPtr(
2105 VTable->getValueType(), VTable, Indices, /*InBounds=*/true, InRange);
2108 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
2109 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
2110 const CXXRecordDecl *NearestVBase) {
2111 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
2112 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
2114 // Get the secondary vpointer index.
2115 uint64_t VirtualPointerIndex =
2116 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
2118 /// Load the VTT.
2119 llvm::Value *VTT = CGF.LoadCXXVTT();
2120 if (VirtualPointerIndex)
2121 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.GlobalsVoidPtrTy, VTT,
2122 VirtualPointerIndex);
2124 // And load the address point from the VTT.
2125 llvm::Value *AP =
2126 CGF.Builder.CreateAlignedLoad(CGF.GlobalsVoidPtrTy, VTT,
2127 CGF.getPointerAlign());
2129 if (auto &Schema = CGF.CGM.getCodeGenOpts().PointerAuth.CXXVTTVTablePointers) {
2130 CGPointerAuthInfo PointerAuth = CGF.EmitPointerAuthInfo(Schema, VTT,
2131 GlobalDecl(),
2132 QualType());
2133 AP = CGF.EmitPointerAuthAuth(PointerAuth, AP);
2136 return AP;
2139 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
2140 CharUnits VPtrOffset) {
2141 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
2143 llvm::GlobalVariable *&VTable = VTables[RD];
2144 if (VTable)
2145 return VTable;
2147 // Queue up this vtable for possible deferred emission.
2148 CGM.addDeferredVTable(RD);
2150 SmallString<256> Name;
2151 llvm::raw_svector_ostream Out(Name);
2152 getMangleContext().mangleCXXVTable(RD, Out);
2154 const VTableLayout &VTLayout =
2155 CGM.getItaniumVTableContext().getVTableLayout(RD);
2156 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
2158 // Use pointer to global alignment for the vtable. Otherwise we would align
2159 // them based on the size of the initializer which doesn't make sense as only
2160 // single values are read.
2161 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
2162 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
2163 ? 32
2164 : CGM.getTarget().getPointerAlign(AS);
2166 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
2167 Name, VTableType, llvm::GlobalValue::ExternalLinkage,
2168 getContext().toCharUnitsFromBits(PAlign).getAsAlign());
2169 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2171 if (CGM.getTarget().hasPS4DLLImportExport())
2172 setVTableSelectiveDLLImportExport(CGM, VTable, RD);
2174 CGM.setGVProperties(VTable, RD);
2175 return VTable;
2178 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
2179 GlobalDecl GD,
2180 Address This,
2181 llvm::Type *Ty,
2182 SourceLocation Loc) {
2183 llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
2184 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
2185 llvm::Value *VTable = CGF.GetVTablePtr(This, PtrTy, MethodDecl->getParent());
2187 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
2188 llvm::Value *VFunc, *VTableSlotPtr = nullptr;
2189 auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers;
2190 if (!Schema && CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
2191 VFunc = CGF.EmitVTableTypeCheckedLoad(
2192 MethodDecl->getParent(), VTable, PtrTy,
2193 VTableIndex *
2194 CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) /
2196 } else {
2197 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
2199 llvm::Value *VFuncLoad;
2200 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
2201 VFuncLoad = CGF.Builder.CreateCall(
2202 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
2203 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
2204 } else {
2205 VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2206 PtrTy, VTable, VTableIndex, "vfn");
2207 VFuncLoad = CGF.Builder.CreateAlignedLoad(PtrTy, VTableSlotPtr,
2208 CGF.getPointerAlign());
2211 // Add !invariant.load md to virtual function load to indicate that
2212 // function didn't change inside vtable.
2213 // It's safe to add it without -fstrict-vtable-pointers, but it would not
2214 // help in devirtualization because it will only matter if we will have 2
2215 // the same virtual function loads from the same vtable load, which won't
2216 // happen without enabled devirtualization with -fstrict-vtable-pointers.
2217 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2218 CGM.getCodeGenOpts().StrictVTablePointers) {
2219 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
2220 VFuncLoadInstr->setMetadata(
2221 llvm::LLVMContext::MD_invariant_load,
2222 llvm::MDNode::get(CGM.getLLVMContext(),
2223 llvm::ArrayRef<llvm::Metadata *>()));
2226 VFunc = VFuncLoad;
2229 CGPointerAuthInfo PointerAuth;
2230 if (Schema) {
2231 assert(VTableSlotPtr && "virtual function pointer not set");
2232 GD = CGM.getItaniumVTableContext().findOriginalMethod(GD.getCanonicalDecl());
2233 PointerAuth = CGF.EmitPointerAuthInfo(Schema, VTableSlotPtr, GD, QualType());
2235 CGCallee Callee(GD, VFunc, PointerAuth);
2236 return Callee;
2239 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2240 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2241 Address This, DeleteOrMemberCallExpr E, llvm::CallBase **CallOrInvoke) {
2242 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2243 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2244 assert((CE != nullptr) ^ (D != nullptr));
2245 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2246 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2248 GlobalDecl GD(Dtor, DtorType);
2249 const CGFunctionInfo *FInfo =
2250 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2251 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2252 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2254 QualType ThisTy;
2255 if (CE) {
2256 ThisTy = CE->getObjectType();
2257 } else {
2258 ThisTy = D->getDestroyedType();
2261 CGF.EmitCXXDestructorCall(GD, Callee, This.emitRawPointer(CGF), ThisTy,
2262 nullptr, QualType(), nullptr, CallOrInvoke);
2263 return nullptr;
2266 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2267 CodeGenVTables &VTables = CGM.getVTables();
2268 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2269 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2272 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2273 const CXXRecordDecl *RD) const {
2274 // We don't emit available_externally vtables if we are in -fapple-kext mode
2275 // because kext mode does not permit devirtualization.
2276 if (CGM.getLangOpts().AppleKext)
2277 return false;
2279 // If the vtable is hidden then it is not safe to emit an available_externally
2280 // copy of vtable.
2281 if (isVTableHidden(RD))
2282 return false;
2284 if (CGM.getCodeGenOpts().ForceEmitVTables)
2285 return true;
2287 // A speculative vtable can only be generated if all virtual inline functions
2288 // defined by this class are emitted. The vtable in the final program contains
2289 // for each virtual inline function not used in the current TU a function that
2290 // is equivalent to the unused function. The function in the actual vtable
2291 // does not have to be declared under the same symbol (e.g., a virtual
2292 // destructor that can be substituted with its base class's destructor). Since
2293 // inline functions are emitted lazily and this emissions does not account for
2294 // speculative emission of a vtable, we might generate a speculative vtable
2295 // with references to inline functions that are not emitted under that name.
2296 // This can lead to problems when devirtualizing a call to such a function,
2297 // that result in linking errors. Hence, if there are any unused virtual
2298 // inline function, we cannot emit the speculative vtable.
2299 // FIXME we can still emit a copy of the vtable if we
2300 // can emit definition of the inline functions.
2301 if (hasAnyUnusedVirtualInlineFunction(RD))
2302 return false;
2304 // For a class with virtual bases, we must also be able to speculatively
2305 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2306 // the vtable" and "can emit the VTT". For a base subobject, this means we
2307 // need to be able to emit non-virtual base vtables.
2308 if (RD->getNumVBases()) {
2309 for (const auto &B : RD->bases()) {
2310 auto *BRD = B.getType()->getAsCXXRecordDecl();
2311 assert(BRD && "no class for base specifier");
2312 if (B.isVirtual() || !BRD->isDynamicClass())
2313 continue;
2314 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2315 return false;
2319 return true;
2322 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2323 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2324 return false;
2326 if (RD->shouldEmitInExternalSource())
2327 return false;
2329 // For a complete-object vtable (or more specifically, for the VTT), we need
2330 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2331 for (const auto &B : RD->vbases()) {
2332 auto *BRD = B.getType()->getAsCXXRecordDecl();
2333 assert(BRD && "no class for base specifier");
2334 if (!BRD->isDynamicClass())
2335 continue;
2336 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2337 return false;
2340 return true;
2342 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2343 Address InitialPtr,
2344 const CXXRecordDecl *UnadjustedClass,
2345 int64_t NonVirtualAdjustment,
2346 int64_t VirtualAdjustment,
2347 bool IsReturnAdjustment) {
2348 if (!NonVirtualAdjustment && !VirtualAdjustment)
2349 return InitialPtr.emitRawPointer(CGF);
2351 Address V = InitialPtr.withElementType(CGF.Int8Ty);
2353 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2354 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2355 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2356 CharUnits::fromQuantity(NonVirtualAdjustment));
2359 // Perform the virtual adjustment if we have one.
2360 llvm::Value *ResultPtr;
2361 if (VirtualAdjustment) {
2362 llvm::Value *VTablePtr =
2363 CGF.GetVTablePtr(V, CGF.Int8PtrTy, UnadjustedClass);
2365 llvm::Value *Offset;
2366 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2367 CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2368 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2369 // Load the adjustment offset from the vtable as a 32-bit int.
2370 Offset =
2371 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2372 CharUnits::fromQuantity(4));
2373 } else {
2374 llvm::Type *PtrDiffTy =
2375 CGF.ConvertType(CGF.getContext().getPointerDiffType());
2377 // Load the adjustment offset from the vtable.
2378 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2379 CGF.getPointerAlign());
2381 // Adjust our pointer.
2382 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getElementType(),
2383 V.emitRawPointer(CGF), Offset);
2384 } else {
2385 ResultPtr = V.emitRawPointer(CGF);
2388 // In a derived-to-base conversion, the non-virtual adjustment is
2389 // applied second.
2390 if (NonVirtualAdjustment && IsReturnAdjustment) {
2391 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2392 NonVirtualAdjustment);
2395 return ResultPtr;
2398 llvm::Value *
2399 ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, Address This,
2400 const CXXRecordDecl *UnadjustedClass,
2401 const ThunkInfo &TI) {
2402 return performTypeAdjustment(CGF, This, UnadjustedClass, TI.This.NonVirtual,
2403 TI.This.Virtual.Itanium.VCallOffsetOffset,
2404 /*IsReturnAdjustment=*/false);
2407 llvm::Value *
2408 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2409 const CXXRecordDecl *UnadjustedClass,
2410 const ReturnAdjustment &RA) {
2411 return performTypeAdjustment(CGF, Ret, UnadjustedClass, RA.NonVirtual,
2412 RA.Virtual.Itanium.VBaseOffsetOffset,
2413 /*IsReturnAdjustment=*/true);
2416 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2417 RValue RV, QualType ResultType) {
2418 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2419 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2421 // Destructor thunks in the ARM ABI have indeterminate results.
2422 llvm::Type *T = CGF.ReturnValue.getElementType();
2423 RValue Undef = RValue::get(llvm::UndefValue::get(T));
2424 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2427 /************************** Array allocation cookies **************************/
2429 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2430 // The array cookie is a size_t; pad that up to the element alignment.
2431 // The cookie is actually right-justified in that space.
2432 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2433 CGM.getContext().getPreferredTypeAlignInChars(elementType));
2436 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2437 Address NewPtr,
2438 llvm::Value *NumElements,
2439 const CXXNewExpr *expr,
2440 QualType ElementType) {
2441 assert(requiresArrayCookie(expr));
2443 unsigned AS = NewPtr.getAddressSpace();
2445 ASTContext &Ctx = getContext();
2446 CharUnits SizeSize = CGF.getSizeSize();
2448 // The size of the cookie.
2449 CharUnits CookieSize =
2450 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2451 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2453 // Compute an offset to the cookie.
2454 Address CookiePtr = NewPtr;
2455 CharUnits CookieOffset = CookieSize - SizeSize;
2456 if (!CookieOffset.isZero())
2457 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2459 // Write the number of elements into the appropriate slot.
2460 Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy);
2461 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2463 // Handle the array cookie specially in ASan.
2464 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2465 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2466 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2467 // The store to the CookiePtr does not need to be instrumented.
2468 SI->setNoSanitizeMetadata();
2469 llvm::FunctionType *FTy =
2470 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2471 llvm::FunctionCallee F =
2472 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2473 CGF.Builder.CreateCall(F, NumElementsPtr.emitRawPointer(CGF));
2476 // Finally, compute a pointer to the actual data buffer by skipping
2477 // over the cookie completely.
2478 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2481 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2482 Address allocPtr,
2483 CharUnits cookieSize) {
2484 // The element size is right-justified in the cookie.
2485 Address numElementsPtr = allocPtr;
2486 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2487 if (!numElementsOffset.isZero())
2488 numElementsPtr =
2489 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2491 unsigned AS = allocPtr.getAddressSpace();
2492 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2493 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2494 return CGF.Builder.CreateLoad(numElementsPtr);
2495 // In asan mode emit a function call instead of a regular load and let the
2496 // run-time deal with it: if the shadow is properly poisoned return the
2497 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2498 // We can't simply ignore this load using nosanitize metadata because
2499 // the metadata may be lost.
2500 llvm::FunctionType *FTy =
2501 llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
2502 llvm::FunctionCallee F =
2503 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2504 return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF));
2507 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2508 // ARM says that the cookie is always:
2509 // struct array_cookie {
2510 // std::size_t element_size; // element_size != 0
2511 // std::size_t element_count;
2512 // };
2513 // But the base ABI doesn't give anything an alignment greater than
2514 // 8, so we can dismiss this as typical ABI-author blindness to
2515 // actual language complexity and round up to the element alignment.
2516 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2517 CGM.getContext().getTypeAlignInChars(elementType));
2520 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2521 Address newPtr,
2522 llvm::Value *numElements,
2523 const CXXNewExpr *expr,
2524 QualType elementType) {
2525 assert(requiresArrayCookie(expr));
2527 // The cookie is always at the start of the buffer.
2528 Address cookie = newPtr;
2530 // The first element is the element size.
2531 cookie = cookie.withElementType(CGF.SizeTy);
2532 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2533 getContext().getTypeSizeInChars(elementType).getQuantity());
2534 CGF.Builder.CreateStore(elementSize, cookie);
2536 // The second element is the element count.
2537 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2538 CGF.Builder.CreateStore(numElements, cookie);
2540 // Finally, compute a pointer to the actual data buffer by skipping
2541 // over the cookie completely.
2542 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2543 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2546 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2547 Address allocPtr,
2548 CharUnits cookieSize) {
2549 // The number of elements is at offset sizeof(size_t) relative to
2550 // the allocated pointer.
2551 Address numElementsPtr
2552 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2554 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
2555 return CGF.Builder.CreateLoad(numElementsPtr);
2558 /*********************** Static local initialization **************************/
2560 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2561 llvm::PointerType *GuardPtrTy) {
2562 // int __cxa_guard_acquire(__guard *guard_object);
2563 llvm::FunctionType *FTy =
2564 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2565 GuardPtrTy, /*isVarArg=*/false);
2566 return CGM.CreateRuntimeFunction(
2567 FTy, "__cxa_guard_acquire",
2568 llvm::AttributeList::get(CGM.getLLVMContext(),
2569 llvm::AttributeList::FunctionIndex,
2570 llvm::Attribute::NoUnwind));
2573 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2574 llvm::PointerType *GuardPtrTy) {
2575 // void __cxa_guard_release(__guard *guard_object);
2576 llvm::FunctionType *FTy =
2577 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2578 return CGM.CreateRuntimeFunction(
2579 FTy, "__cxa_guard_release",
2580 llvm::AttributeList::get(CGM.getLLVMContext(),
2581 llvm::AttributeList::FunctionIndex,
2582 llvm::Attribute::NoUnwind));
2585 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2586 llvm::PointerType *GuardPtrTy) {
2587 // void __cxa_guard_abort(__guard *guard_object);
2588 llvm::FunctionType *FTy =
2589 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2590 return CGM.CreateRuntimeFunction(
2591 FTy, "__cxa_guard_abort",
2592 llvm::AttributeList::get(CGM.getLLVMContext(),
2593 llvm::AttributeList::FunctionIndex,
2594 llvm::Attribute::NoUnwind));
2597 namespace {
2598 struct CallGuardAbort final : EHScopeStack::Cleanup {
2599 llvm::GlobalVariable *Guard;
2600 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2602 void Emit(CodeGenFunction &CGF, Flags flags) override {
2603 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2604 Guard);
2609 /// The ARM code here follows the Itanium code closely enough that we
2610 /// just special-case it at particular places.
2611 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2612 const VarDecl &D,
2613 llvm::GlobalVariable *var,
2614 bool shouldPerformInit) {
2615 CGBuilderTy &Builder = CGF.Builder;
2617 // Inline variables that weren't instantiated from variable templates have
2618 // partially-ordered initialization within their translation unit.
2619 bool NonTemplateInline =
2620 D.isInline() &&
2621 !isTemplateInstantiation(D.getTemplateSpecializationKind());
2623 // We only need to use thread-safe statics for local non-TLS variables and
2624 // inline variables; other global initialization is always single-threaded
2625 // or (through lazy dynamic loading in multiple threads) unsequenced.
2626 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2627 (D.isLocalVarDecl() || NonTemplateInline) &&
2628 !D.getTLSKind();
2630 // If we have a global variable with internal linkage and thread-safe statics
2631 // are disabled, we can just let the guard variable be of type i8.
2632 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2634 llvm::IntegerType *guardTy;
2635 CharUnits guardAlignment;
2636 if (useInt8GuardVariable) {
2637 guardTy = CGF.Int8Ty;
2638 guardAlignment = CharUnits::One();
2639 } else {
2640 // Guard variables are 64 bits in the generic ABI and size width on ARM
2641 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2642 if (UseARMGuardVarABI) {
2643 guardTy = CGF.SizeTy;
2644 guardAlignment = CGF.getSizeAlign();
2645 } else {
2646 guardTy = CGF.Int64Ty;
2647 guardAlignment =
2648 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy));
2651 llvm::PointerType *guardPtrTy = llvm::PointerType::get(
2652 CGF.CGM.getLLVMContext(),
2653 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2655 // Create the guard variable if we don't already have it (as we
2656 // might if we're double-emitting this function body).
2657 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2658 if (!guard) {
2659 // Mangle the name for the guard.
2660 SmallString<256> guardName;
2662 llvm::raw_svector_ostream out(guardName);
2663 getMangleContext().mangleStaticGuardVariable(&D, out);
2666 // Create the guard variable with a zero-initializer.
2667 // Just absorb linkage, visibility and dll storage class from the guarded
2668 // variable.
2669 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2670 false, var->getLinkage(),
2671 llvm::ConstantInt::get(guardTy, 0),
2672 guardName.str());
2673 guard->setDSOLocal(var->isDSOLocal());
2674 guard->setVisibility(var->getVisibility());
2675 guard->setDLLStorageClass(var->getDLLStorageClass());
2676 // If the variable is thread-local, so is its guard variable.
2677 guard->setThreadLocalMode(var->getThreadLocalMode());
2678 guard->setAlignment(guardAlignment.getAsAlign());
2680 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2681 // group as the associated data object." In practice, this doesn't work for
2682 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2683 llvm::Comdat *C = var->getComdat();
2684 if (!D.isLocalVarDecl() && C &&
2685 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2686 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2687 guard->setComdat(C);
2688 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2689 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2692 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2695 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2697 // Test whether the variable has completed initialization.
2699 // Itanium C++ ABI 3.3.2:
2700 // The following is pseudo-code showing how these functions can be used:
2701 // if (obj_guard.first_byte == 0) {
2702 // if ( __cxa_guard_acquire (&obj_guard) ) {
2703 // try {
2704 // ... initialize the object ...;
2705 // } catch (...) {
2706 // __cxa_guard_abort (&obj_guard);
2707 // throw;
2708 // }
2709 // ... queue object destructor with __cxa_atexit() ...;
2710 // __cxa_guard_release (&obj_guard);
2711 // }
2712 // }
2714 // If threadsafe statics are enabled, but we don't have inline atomics, just
2715 // call __cxa_guard_acquire unconditionally. The "inline" check isn't
2716 // actually inline, and the user might not expect calls to __atomic libcalls.
2718 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
2719 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2720 if (!threadsafe || MaxInlineWidthInBits) {
2721 // Load the first byte of the guard variable.
2722 llvm::LoadInst *LI =
2723 Builder.CreateLoad(guardAddr.withElementType(CGM.Int8Ty));
2725 // Itanium ABI:
2726 // An implementation supporting thread-safety on multiprocessor
2727 // systems must also guarantee that references to the initialized
2728 // object do not occur before the load of the initialization flag.
2730 // In LLVM, we do this by marking the load Acquire.
2731 if (threadsafe)
2732 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2734 // For ARM, we should only check the first bit, rather than the entire byte:
2736 // ARM C++ ABI 3.2.3.1:
2737 // To support the potential use of initialization guard variables
2738 // as semaphores that are the target of ARM SWP and LDREX/STREX
2739 // synchronizing instructions we define a static initialization
2740 // guard variable to be a 4-byte aligned, 4-byte word with the
2741 // following inline access protocol.
2742 // #define INITIALIZED 1
2743 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2744 // if (__cxa_guard_acquire(&obj_guard))
2745 // ...
2746 // }
2748 // and similarly for ARM64:
2750 // ARM64 C++ ABI 3.2.2:
2751 // This ABI instead only specifies the value bit 0 of the static guard
2752 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2753 // variable is not initialized and 1 when it is.
2754 llvm::Value *V =
2755 (UseARMGuardVarABI && !useInt8GuardVariable)
2756 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2757 : LI;
2758 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2760 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2762 // Check if the first byte of the guard variable is zero.
2763 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2764 CodeGenFunction::GuardKind::VariableGuard, &D);
2766 CGF.EmitBlock(InitCheckBlock);
2769 // The semantics of dynamic initialization of variables with static or thread
2770 // storage duration depends on whether they are declared at block-scope. The
2771 // initialization of such variables at block-scope can be aborted with an
2772 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
2773 // to their initialization has undefined behavior (also per C++20
2774 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
2775 // lead to termination (per C++20 [except.terminate]p1), and recursive
2776 // references to the variables are governed only by the lifetime rules (per
2777 // C++20 [class.cdtor]p2), which means such references are perfectly fine as
2778 // long as they avoid touching memory. As a result, block-scope variables must
2779 // not be marked as initialized until after initialization completes (unless
2780 // the mark is reverted following an exception), but non-block-scope variables
2781 // must be marked prior to initialization so that recursive accesses during
2782 // initialization do not restart initialization.
2784 // Variables used when coping with thread-safe statics and exceptions.
2785 if (threadsafe) {
2786 // Call __cxa_guard_acquire.
2787 llvm::Value *V
2788 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2790 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2792 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2793 InitBlock, EndBlock);
2795 // Call __cxa_guard_abort along the exceptional edge.
2796 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2798 CGF.EmitBlock(InitBlock);
2799 } else if (!D.isLocalVarDecl()) {
2800 // For non-local variables, store 1 into the first byte of the guard
2801 // variable before the object initialization begins so that references
2802 // to the variable during initialization don't restart initialization.
2803 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2804 guardAddr.withElementType(CGM.Int8Ty));
2807 // Emit the initializer and add a global destructor if appropriate.
2808 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2810 if (threadsafe) {
2811 // Pop the guard-abort cleanup if we pushed one.
2812 CGF.PopCleanupBlock();
2814 // Call __cxa_guard_release. This cannot throw.
2815 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2816 guardAddr.emitRawPointer(CGF));
2817 } else if (D.isLocalVarDecl()) {
2818 // For local variables, store 1 into the first byte of the guard variable
2819 // after the object initialization completes so that initialization is
2820 // retried if initialization is interrupted by an exception.
2821 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2822 guardAddr.withElementType(CGM.Int8Ty));
2825 CGF.EmitBlock(EndBlock);
2828 /// Register a global destructor using __cxa_atexit.
2829 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2830 llvm::FunctionCallee dtor,
2831 llvm::Constant *addr, bool TLS) {
2832 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2833 "unexpected call to emitGlobalDtorWithCXAAtExit");
2834 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2835 "__cxa_atexit is disabled");
2836 const char *Name = "__cxa_atexit";
2837 if (TLS) {
2838 const llvm::Triple &T = CGF.getTarget().getTriple();
2839 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2842 // We're assuming that the destructor function is something we can
2843 // reasonably call with the default CC.
2844 llvm::Type *dtorTy = CGF.UnqualPtrTy;
2846 // Preserve address space of addr.
2847 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2848 auto AddrPtrTy = AddrAS ? llvm::PointerType::get(CGF.getLLVMContext(), AddrAS)
2849 : CGF.Int8PtrTy;
2851 // Create a variable that binds the atexit to this shared object.
2852 llvm::Constant *handle =
2853 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2854 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2855 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2857 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2858 llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
2859 llvm::FunctionType *atexitTy =
2860 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2862 // Fetch the actual function.
2863 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2864 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2865 fn->setDoesNotThrow();
2867 const auto &Context = CGF.CGM.getContext();
2868 FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
2869 /*IsVariadic=*/false, /*IsCXXMethod=*/false));
2870 QualType fnType =
2871 Context.getFunctionType(Context.VoidTy, {Context.VoidPtrTy}, EPI);
2872 llvm::Constant *dtorCallee = cast<llvm::Constant>(dtor.getCallee());
2873 dtorCallee = CGF.CGM.getFunctionPointer(dtorCallee, fnType);
2875 if (!addr)
2876 // addr is null when we are trying to register a dtor annotated with
2877 // __attribute__((destructor)) in a constructor function. Using null here is
2878 // okay because this argument is just passed back to the destructor
2879 // function.
2880 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2882 llvm::Value *args[] = {dtorCallee, addr, handle};
2883 CGF.EmitNounwindRuntimeCall(atexit, args);
2886 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2887 StringRef FnName) {
2888 // Create a function that registers/unregisters destructors that have the same
2889 // priority.
2890 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2891 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2892 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2894 return GlobalInitOrCleanupFn;
2897 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2898 for (const auto &I : DtorsUsingAtExit) {
2899 int Priority = I.first;
2900 std::string GlobalCleanupFnName =
2901 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2903 llvm::Function *GlobalCleanupFn =
2904 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2906 CodeGenFunction CGF(*this);
2907 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2908 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2909 SourceLocation(), SourceLocation());
2910 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2912 // Get the destructor function type, void(*)(void).
2913 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2915 // Destructor functions are run/unregistered in non-ascending
2916 // order of their priorities.
2917 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2918 auto itv = Dtors.rbegin();
2919 while (itv != Dtors.rend()) {
2920 llvm::Function *Dtor = *itv;
2922 // We're assuming that the destructor function is something we can
2923 // reasonably call with the correct CC.
2924 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(Dtor);
2925 llvm::Value *NeedsDestruct =
2926 CGF.Builder.CreateIsNull(V, "needs_destruct");
2928 llvm::BasicBlock *DestructCallBlock =
2929 CGF.createBasicBlock("destruct.call");
2930 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2931 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2932 // Check if unatexit returns a value of 0. If it does, jump to
2933 // DestructCallBlock, otherwise jump to EndBlock directly.
2934 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2936 CGF.EmitBlock(DestructCallBlock);
2938 // Emit the call to casted Dtor.
2939 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, Dtor);
2940 // Make sure the call and the callee agree on calling convention.
2941 CI->setCallingConv(Dtor->getCallingConv());
2943 CGF.EmitBlock(EndBlock);
2945 itv++;
2948 CGF.FinishFunction();
2949 AddGlobalDtor(GlobalCleanupFn, Priority);
2953 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2954 for (const auto &I : DtorsUsingAtExit) {
2955 int Priority = I.first;
2956 std::string GlobalInitFnName =
2957 std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2958 llvm::Function *GlobalInitFn =
2959 createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2961 CodeGenFunction CGF(*this);
2962 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2963 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2964 SourceLocation(), SourceLocation());
2965 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2967 // Since constructor functions are run in non-descending order of their
2968 // priorities, destructors are registered in non-descending order of their
2969 // priorities, and since destructor functions are run in the reverse order
2970 // of their registration, destructor functions are run in non-ascending
2971 // order of their priorities.
2972 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2973 for (auto *Dtor : Dtors) {
2974 // Register the destructor function calling __cxa_atexit if it is
2975 // available. Otherwise fall back on calling atexit.
2976 if (getCodeGenOpts().CXAAtExit) {
2977 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2978 } else {
2979 // We're assuming that the destructor function is something we can
2980 // reasonably call with the correct CC.
2981 CGF.registerGlobalDtorWithAtExit(Dtor);
2985 CGF.FinishFunction();
2986 AddGlobalCtor(GlobalInitFn, Priority);
2989 if (getCXXABI().useSinitAndSterm())
2990 unregisterGlobalDtorsWithUnAtExit();
2993 /// Register a global destructor as best as we know how.
2994 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2995 llvm::FunctionCallee dtor,
2996 llvm::Constant *addr) {
2997 if (D.isNoDestroy(CGM.getContext()))
2998 return;
3000 // HLSL doesn't support atexit.
3001 if (CGM.getLangOpts().HLSL)
3002 return CGM.AddCXXDtorEntry(dtor, addr);
3004 // OpenMP offloading supports C++ constructors and destructors but we do not
3005 // always have 'atexit' available. Instead lower these to use the LLVM global
3006 // destructors which we can handle directly in the runtime. Note that this is
3007 // not strictly 1-to-1 with using `atexit` because we no longer tear down
3008 // globals in reverse order of when they were constructed.
3009 if (!CGM.getLangOpts().hasAtExit() && !D.isStaticLocal())
3010 return CGF.registerGlobalDtorWithLLVM(D, dtor, addr);
3012 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
3013 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
3014 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
3015 // We can always use __cxa_thread_atexit.
3016 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
3017 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
3019 // In Apple kexts, we want to add a global destructor entry.
3020 // FIXME: shouldn't this be guarded by some variable?
3021 if (CGM.getLangOpts().AppleKext) {
3022 // Generate a global destructor entry.
3023 return CGM.AddCXXDtorEntry(dtor, addr);
3026 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
3029 static bool isThreadWrapperReplaceable(const VarDecl *VD,
3030 CodeGen::CodeGenModule &CGM) {
3031 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
3032 // Darwin prefers to have references to thread local variables to go through
3033 // the thread wrapper instead of directly referencing the backing variable.
3034 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3035 CGM.getTarget().getTriple().isOSDarwin();
3038 /// Get the appropriate linkage for the wrapper function. This is essentially
3039 /// the weak form of the variable's linkage; every translation unit which needs
3040 /// the wrapper emits a copy, and we want the linker to merge them.
3041 static llvm::GlobalValue::LinkageTypes
3042 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
3043 llvm::GlobalValue::LinkageTypes VarLinkage =
3044 CGM.getLLVMLinkageVarDefinition(VD);
3046 // For internal linkage variables, we don't need an external or weak wrapper.
3047 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
3048 return VarLinkage;
3050 // If the thread wrapper is replaceable, give it appropriate linkage.
3051 if (isThreadWrapperReplaceable(VD, CGM))
3052 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
3053 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
3054 return VarLinkage;
3055 return llvm::GlobalValue::WeakODRLinkage;
3058 llvm::Function *
3059 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
3060 llvm::Value *Val) {
3061 // Mangle the name for the thread_local wrapper function.
3062 SmallString<256> WrapperName;
3064 llvm::raw_svector_ostream Out(WrapperName);
3065 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
3068 // FIXME: If VD is a definition, we should regenerate the function attributes
3069 // before returning.
3070 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
3071 return cast<llvm::Function>(V);
3073 QualType RetQT = VD->getType();
3074 if (RetQT->isReferenceType())
3075 RetQT = RetQT.getNonReferenceType();
3077 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
3078 getContext().getPointerType(RetQT), FunctionArgList());
3080 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
3081 llvm::Function *Wrapper =
3082 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
3083 WrapperName.str(), &CGM.getModule());
3085 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
3086 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
3088 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
3090 // Always resolve references to the wrapper at link time.
3091 if (!Wrapper->hasLocalLinkage())
3092 if (!isThreadWrapperReplaceable(VD, CGM) ||
3093 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
3094 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
3095 VD->getVisibility() == HiddenVisibility)
3096 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
3098 if (isThreadWrapperReplaceable(VD, CGM)) {
3099 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3100 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
3103 ThreadWrappers.push_back({VD, Wrapper});
3104 return Wrapper;
3107 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
3108 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
3109 ArrayRef<llvm::Function *> CXXThreadLocalInits,
3110 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
3111 llvm::Function *InitFunc = nullptr;
3113 // Separate initializers into those with ordered (or partially-ordered)
3114 // initialization and those with unordered initialization.
3115 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
3116 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
3117 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
3118 if (isTemplateInstantiation(
3119 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
3120 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
3121 CXXThreadLocalInits[I];
3122 else
3123 OrderedInits.push_back(CXXThreadLocalInits[I]);
3126 if (!OrderedInits.empty()) {
3127 // Generate a guarded initialization function.
3128 llvm::FunctionType *FTy =
3129 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
3130 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3131 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
3132 SourceLocation(),
3133 /*TLS=*/true);
3134 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
3135 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
3136 llvm::GlobalVariable::InternalLinkage,
3137 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
3138 Guard->setThreadLocal(true);
3139 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
3141 CharUnits GuardAlign = CharUnits::One();
3142 Guard->setAlignment(GuardAlign.getAsAlign());
3144 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
3145 InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
3146 // On Darwin platforms, use CXX_FAST_TLS calling convention.
3147 if (CGM.getTarget().getTriple().isOSDarwin()) {
3148 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3149 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
3153 // Create declarations for thread wrappers for all thread-local variables
3154 // with non-discardable definitions in this translation unit.
3155 for (const VarDecl *VD : CXXThreadLocals) {
3156 if (VD->hasDefinition() &&
3157 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
3158 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
3159 getOrCreateThreadLocalWrapper(VD, GV);
3163 // Emit all referenced thread wrappers.
3164 for (auto VDAndWrapper : ThreadWrappers) {
3165 const VarDecl *VD = VDAndWrapper.first;
3166 llvm::GlobalVariable *Var =
3167 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
3168 llvm::Function *Wrapper = VDAndWrapper.second;
3170 // Some targets require that all access to thread local variables go through
3171 // the thread wrapper. This means that we cannot attempt to create a thread
3172 // wrapper or a thread helper.
3173 if (!VD->hasDefinition()) {
3174 if (isThreadWrapperReplaceable(VD, CGM)) {
3175 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
3176 continue;
3179 // If this isn't a TU in which this variable is defined, the thread
3180 // wrapper is discardable.
3181 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
3182 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
3185 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
3187 // Mangle the name for the thread_local initialization function.
3188 SmallString<256> InitFnName;
3190 llvm::raw_svector_ostream Out(InitFnName);
3191 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
3194 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
3196 // If we have a definition for the variable, emit the initialization
3197 // function as an alias to the global Init function (if any). Otherwise,
3198 // produce a declaration of the initialization function.
3199 llvm::GlobalValue *Init = nullptr;
3200 bool InitIsInitFunc = false;
3201 bool HasConstantInitialization = false;
3202 if (!usesThreadWrapperFunction(VD)) {
3203 HasConstantInitialization = true;
3204 } else if (VD->hasDefinition()) {
3205 InitIsInitFunc = true;
3206 llvm::Function *InitFuncToUse = InitFunc;
3207 if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
3208 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
3209 if (InitFuncToUse)
3210 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
3211 InitFuncToUse);
3212 } else {
3213 // Emit a weak global function referring to the initialization function.
3214 // This function will not exist if the TU defining the thread_local
3215 // variable in question does not need any dynamic initialization for
3216 // its thread_local variables.
3217 Init = llvm::Function::Create(InitFnTy,
3218 llvm::GlobalVariable::ExternalWeakLinkage,
3219 InitFnName.str(), &CGM.getModule());
3220 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3221 CGM.SetLLVMFunctionAttributes(
3222 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
3225 if (Init) {
3226 Init->setVisibility(Var->getVisibility());
3227 // Don't mark an extern_weak function DSO local on windows.
3228 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
3229 Init->setDSOLocal(Var->isDSOLocal());
3232 llvm::LLVMContext &Context = CGM.getModule().getContext();
3234 // The linker on AIX is not happy with missing weak symbols. However,
3235 // other TUs will not know whether the initialization routine exists
3236 // so create an empty, init function to satisfy the linker.
3237 // This is needed whenever a thread wrapper function is not used, and
3238 // also when the symbol is weak.
3239 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
3240 isEmittedWithConstantInitializer(VD, true) &&
3241 !mayNeedDestruction(VD)) {
3242 // Init should be null. If it were non-null, then the logic above would
3243 // either be defining the function to be an alias or declaring the
3244 // function with the expectation that the definition of the variable
3245 // is elsewhere.
3246 assert(Init == nullptr && "Expected Init to be null.");
3248 llvm::Function *Func = llvm::Function::Create(
3249 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
3250 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
3251 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
3252 cast<llvm::Function>(Func),
3253 /*IsThunk=*/false);
3254 // Create a function body that just returns
3255 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
3256 CGBuilderTy Builder(CGM, Entry);
3257 Builder.CreateRetVoid();
3260 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
3261 CGBuilderTy Builder(CGM, Entry);
3262 if (HasConstantInitialization) {
3263 // No dynamic initialization to invoke.
3264 } else if (InitIsInitFunc) {
3265 if (Init) {
3266 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
3267 if (isThreadWrapperReplaceable(VD, CGM)) {
3268 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3269 llvm::Function *Fn =
3270 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
3271 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3274 } else if (CGM.getTriple().isOSAIX()) {
3275 // On AIX, except if constinit and also neither of class type or of
3276 // (possibly multi-dimensional) array of class type, thread_local vars
3277 // will have init routines regardless of whether they are
3278 // const-initialized. Since the routine is guaranteed to exist, we can
3279 // unconditionally call it without testing for its existance. This
3280 // avoids potentially unresolved weak symbols which the AIX linker
3281 // isn't happy with.
3282 Builder.CreateCall(InitFnTy, Init);
3283 } else {
3284 // Don't know whether we have an init function. Call it if it exists.
3285 llvm::Value *Have = Builder.CreateIsNotNull(Init);
3286 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3287 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3288 Builder.CreateCondBr(Have, InitBB, ExitBB);
3290 Builder.SetInsertPoint(InitBB);
3291 Builder.CreateCall(InitFnTy, Init);
3292 Builder.CreateBr(ExitBB);
3294 Builder.SetInsertPoint(ExitBB);
3297 // For a reference, the result of the wrapper function is a pointer to
3298 // the referenced object.
3299 llvm::Value *Val = Builder.CreateThreadLocalAddress(Var);
3301 if (VD->getType()->isReferenceType()) {
3302 CharUnits Align = CGM.getContext().getDeclAlign(VD);
3303 Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align);
3306 Builder.CreateRet(Val);
3310 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3311 const VarDecl *VD,
3312 QualType LValType) {
3313 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3314 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3316 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3317 CallVal->setCallingConv(Wrapper->getCallingConv());
3319 LValue LV;
3320 if (VD->getType()->isReferenceType())
3321 LV = CGF.MakeNaturalAlignRawAddrLValue(CallVal, LValType);
3322 else
3323 LV = CGF.MakeRawAddrLValue(CallVal, LValType,
3324 CGF.getContext().getDeclAlign(VD));
3325 // FIXME: need setObjCGCLValueClass?
3326 return LV;
3329 /// Return whether the given global decl needs a VTT parameter, which it does
3330 /// if it's a base constructor or destructor with virtual bases.
3331 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3332 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3334 // We don't have any virtual bases, just return early.
3335 if (!MD->getParent()->getNumVBases())
3336 return false;
3338 // Check if we have a base constructor.
3339 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3340 return true;
3342 // Check if we have a base destructor.
3343 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3344 return true;
3346 return false;
3349 llvm::Constant *
3350 ItaniumCXXABI::getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD) {
3351 SmallString<256> MethodName;
3352 llvm::raw_svector_ostream Out(MethodName);
3353 getMangleContext().mangleCXXName(MD, Out);
3354 MethodName += "_vfpthunk_";
3355 StringRef ThunkName = MethodName.str();
3356 llvm::Function *ThunkFn;
3357 if ((ThunkFn = cast_or_null<llvm::Function>(
3358 CGM.getModule().getNamedValue(ThunkName))))
3359 return ThunkFn;
3361 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeCXXMethodDeclaration(MD);
3362 llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
3363 llvm::GlobalValue::LinkageTypes Linkage =
3364 MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage
3365 : llvm::GlobalValue::InternalLinkage;
3366 ThunkFn =
3367 llvm::Function::Create(ThunkTy, Linkage, ThunkName, &CGM.getModule());
3368 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3369 ThunkFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3370 assert(ThunkFn->getName() == ThunkName && "name was uniqued!");
3372 CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn, /*IsThunk=*/true);
3373 CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn);
3375 // Stack protection sometimes gets inserted after the musttail call.
3376 ThunkFn->removeFnAttr(llvm::Attribute::StackProtect);
3377 ThunkFn->removeFnAttr(llvm::Attribute::StackProtectStrong);
3378 ThunkFn->removeFnAttr(llvm::Attribute::StackProtectReq);
3380 // Start codegen.
3381 CodeGenFunction CGF(CGM);
3382 CGF.CurGD = GlobalDecl(MD);
3383 CGF.CurFuncIsThunk = true;
3385 // Build FunctionArgs.
3386 FunctionArgList FunctionArgs;
3387 CGF.BuildFunctionArgList(CGF.CurGD, FunctionArgs);
3389 CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
3390 FunctionArgs, MD->getLocation(), SourceLocation());
3391 llvm::Value *ThisVal = loadIncomingCXXThis(CGF);
3392 setCXXABIThisValue(CGF, ThisVal);
3394 CallArgList CallArgs;
3395 for (const VarDecl *VD : FunctionArgs)
3396 CGF.EmitDelegateCallArg(CallArgs, VD, SourceLocation());
3398 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
3399 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, /*this*/ 1);
3400 const CGFunctionInfo &CallInfo =
3401 CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, Required, 0);
3402 CGCallee Callee = CGCallee::forVirtual(nullptr, GlobalDecl(MD),
3403 getThisAddress(CGF), ThunkTy);
3404 llvm::CallBase *CallOrInvoke;
3405 CGF.EmitCall(CallInfo, Callee, ReturnValueSlot(), CallArgs, &CallOrInvoke,
3406 /*IsMustTail=*/true, SourceLocation(), true);
3407 auto *Call = cast<llvm::CallInst>(CallOrInvoke);
3408 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
3409 if (Call->getType()->isVoidTy())
3410 CGF.Builder.CreateRetVoid();
3411 else
3412 CGF.Builder.CreateRet(Call);
3414 // Finish the function to maintain CodeGenFunction invariants.
3415 // FIXME: Don't emit unreachable code.
3416 CGF.EmitBlock(CGF.createBasicBlock());
3417 CGF.FinishFunction();
3418 return ThunkFn;
3421 namespace {
3422 class ItaniumRTTIBuilder {
3423 CodeGenModule &CGM; // Per-module state.
3424 llvm::LLVMContext &VMContext;
3425 const ItaniumCXXABI &CXXABI; // Per-module state.
3427 /// Fields - The fields of the RTTI descriptor currently being built.
3428 SmallVector<llvm::Constant *, 16> Fields;
3430 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3431 llvm::GlobalVariable *
3432 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3434 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3435 /// descriptor of the given type.
3436 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3438 /// BuildVTablePointer - Build the vtable pointer for the given type.
3439 void BuildVTablePointer(const Type *Ty, llvm::Constant *StorageAddress);
3441 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3442 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3443 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3445 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3446 /// classes with bases that do not satisfy the abi::__si_class_type_info
3447 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3448 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3450 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3451 /// for pointer types.
3452 void BuildPointerTypeInfo(QualType PointeeTy);
3454 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3455 /// type_info for an object type.
3456 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3458 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3459 /// struct, used for member pointer types.
3460 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3462 public:
3463 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3464 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3466 // Pointer type info flags.
3467 enum {
3468 /// PTI_Const - Type has const qualifier.
3469 PTI_Const = 0x1,
3471 /// PTI_Volatile - Type has volatile qualifier.
3472 PTI_Volatile = 0x2,
3474 /// PTI_Restrict - Type has restrict qualifier.
3475 PTI_Restrict = 0x4,
3477 /// PTI_Incomplete - Type is incomplete.
3478 PTI_Incomplete = 0x8,
3480 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3481 /// (in pointer to member).
3482 PTI_ContainingClassIncomplete = 0x10,
3484 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3485 //PTI_TransactionSafe = 0x20,
3487 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3488 PTI_Noexcept = 0x40,
3491 // VMI type info flags.
3492 enum {
3493 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3494 VMI_NonDiamondRepeat = 0x1,
3496 /// VMI_DiamondShaped - Class is diamond shaped.
3497 VMI_DiamondShaped = 0x2
3500 // Base class type info flags.
3501 enum {
3502 /// BCTI_Virtual - Base class is virtual.
3503 BCTI_Virtual = 0x1,
3505 /// BCTI_Public - Base class is public.
3506 BCTI_Public = 0x2
3509 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3510 /// link to an existing RTTI descriptor if one already exists.
3511 llvm::Constant *BuildTypeInfo(QualType Ty);
3513 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3514 llvm::Constant *BuildTypeInfo(
3515 QualType Ty,
3516 llvm::GlobalVariable::LinkageTypes Linkage,
3517 llvm::GlobalValue::VisibilityTypes Visibility,
3518 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3522 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3523 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3524 SmallString<256> Name;
3525 llvm::raw_svector_ostream Out(Name);
3526 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3528 // We know that the mangled name of the type starts at index 4 of the
3529 // mangled name of the typename, so we can just index into it in order to
3530 // get the mangled name of the type.
3531 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3532 Name.substr(4));
3533 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3535 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3536 Name, Init->getType(), Linkage, Align.getAsAlign());
3538 GV->setInitializer(Init);
3540 return GV;
3543 llvm::Constant *
3544 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3545 // Mangle the RTTI name.
3546 SmallString<256> Name;
3547 llvm::raw_svector_ostream Out(Name);
3548 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3550 // Look for an existing global.
3551 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3553 if (!GV) {
3554 // Create a new global variable.
3555 // Note for the future: If we would ever like to do deferred emission of
3556 // RTTI, check if emitting vtables opportunistically need any adjustment.
3558 GV = new llvm::GlobalVariable(
3559 CGM.getModule(), CGM.GlobalsInt8PtrTy,
3560 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
3561 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3562 CGM.setGVProperties(GV, RD);
3563 // Import the typeinfo symbol when all non-inline virtual methods are
3564 // imported.
3565 if (CGM.getTarget().hasPS4DLLImportExport()) {
3566 if (RD && CXXRecordNonInlineHasAttr<DLLImportAttr>(RD)) {
3567 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3568 CGM.setDSOLocal(GV);
3573 return GV;
3576 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3577 /// info for that type is defined in the standard library.
3578 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3579 // Itanium C++ ABI 2.9.2:
3580 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3581 // the run-time support library. Specifically, the run-time support
3582 // library should contain type_info objects for the types X, X* and
3583 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3584 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3585 // long, unsigned long, long long, unsigned long long, float, double,
3586 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3587 // half-precision floating point types.
3589 // GCC also emits RTTI for __int128.
3590 // FIXME: We do not emit RTTI information for decimal types here.
3592 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3593 switch (Ty->getKind()) {
3594 case BuiltinType::Void:
3595 case BuiltinType::NullPtr:
3596 case BuiltinType::Bool:
3597 case BuiltinType::WChar_S:
3598 case BuiltinType::WChar_U:
3599 case BuiltinType::Char_U:
3600 case BuiltinType::Char_S:
3601 case BuiltinType::UChar:
3602 case BuiltinType::SChar:
3603 case BuiltinType::Short:
3604 case BuiltinType::UShort:
3605 case BuiltinType::Int:
3606 case BuiltinType::UInt:
3607 case BuiltinType::Long:
3608 case BuiltinType::ULong:
3609 case BuiltinType::LongLong:
3610 case BuiltinType::ULongLong:
3611 case BuiltinType::Half:
3612 case BuiltinType::Float:
3613 case BuiltinType::Double:
3614 case BuiltinType::LongDouble:
3615 case BuiltinType::Float16:
3616 case BuiltinType::Float128:
3617 case BuiltinType::Ibm128:
3618 case BuiltinType::Char8:
3619 case BuiltinType::Char16:
3620 case BuiltinType::Char32:
3621 case BuiltinType::Int128:
3622 case BuiltinType::UInt128:
3623 return true;
3625 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3626 case BuiltinType::Id:
3627 #include "clang/Basic/OpenCLImageTypes.def"
3628 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3629 case BuiltinType::Id:
3630 #include "clang/Basic/OpenCLExtensionTypes.def"
3631 case BuiltinType::OCLSampler:
3632 case BuiltinType::OCLEvent:
3633 case BuiltinType::OCLClkEvent:
3634 case BuiltinType::OCLQueue:
3635 case BuiltinType::OCLReserveID:
3636 #define SVE_TYPE(Name, Id, SingletonId) \
3637 case BuiltinType::Id:
3638 #include "clang/Basic/AArch64SVEACLETypes.def"
3639 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3640 case BuiltinType::Id:
3641 #include "clang/Basic/PPCTypes.def"
3642 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3643 #include "clang/Basic/RISCVVTypes.def"
3644 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3645 #include "clang/Basic/WebAssemblyReferenceTypes.def"
3646 #define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
3647 #include "clang/Basic/AMDGPUTypes.def"
3648 #define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3649 #include "clang/Basic/HLSLIntangibleTypes.def"
3650 case BuiltinType::ShortAccum:
3651 case BuiltinType::Accum:
3652 case BuiltinType::LongAccum:
3653 case BuiltinType::UShortAccum:
3654 case BuiltinType::UAccum:
3655 case BuiltinType::ULongAccum:
3656 case BuiltinType::ShortFract:
3657 case BuiltinType::Fract:
3658 case BuiltinType::LongFract:
3659 case BuiltinType::UShortFract:
3660 case BuiltinType::UFract:
3661 case BuiltinType::ULongFract:
3662 case BuiltinType::SatShortAccum:
3663 case BuiltinType::SatAccum:
3664 case BuiltinType::SatLongAccum:
3665 case BuiltinType::SatUShortAccum:
3666 case BuiltinType::SatUAccum:
3667 case BuiltinType::SatULongAccum:
3668 case BuiltinType::SatShortFract:
3669 case BuiltinType::SatFract:
3670 case BuiltinType::SatLongFract:
3671 case BuiltinType::SatUShortFract:
3672 case BuiltinType::SatUFract:
3673 case BuiltinType::SatULongFract:
3674 case BuiltinType::BFloat16:
3675 return false;
3677 case BuiltinType::Dependent:
3678 #define BUILTIN_TYPE(Id, SingletonId)
3679 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3680 case BuiltinType::Id:
3681 #include "clang/AST/BuiltinTypes.def"
3682 llvm_unreachable("asking for RRTI for a placeholder type!");
3684 case BuiltinType::ObjCId:
3685 case BuiltinType::ObjCClass:
3686 case BuiltinType::ObjCSel:
3687 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3690 llvm_unreachable("Invalid BuiltinType Kind!");
3693 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3694 QualType PointeeTy = PointerTy->getPointeeType();
3695 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3696 if (!BuiltinTy)
3697 return false;
3699 // Check the qualifiers.
3700 Qualifiers Quals = PointeeTy.getQualifiers();
3701 Quals.removeConst();
3703 if (!Quals.empty())
3704 return false;
3706 return TypeInfoIsInStandardLibrary(BuiltinTy);
3709 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3710 /// information for the given type exists in the standard library.
3711 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3712 // Type info for builtin types is defined in the standard library.
3713 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3714 return TypeInfoIsInStandardLibrary(BuiltinTy);
3716 // Type info for some pointer types to builtin types is defined in the
3717 // standard library.
3718 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3719 return TypeInfoIsInStandardLibrary(PointerTy);
3721 return false;
3724 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3725 /// the given type exists somewhere else, and that we should not emit the type
3726 /// information in this translation unit. Assumes that it is not a
3727 /// standard-library type.
3728 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3729 QualType Ty) {
3730 ASTContext &Context = CGM.getContext();
3732 // If RTTI is disabled, assume it might be disabled in the
3733 // translation unit that defines any potential key function, too.
3734 if (!Context.getLangOpts().RTTI) return false;
3736 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3737 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3738 if (!RD->hasDefinition())
3739 return false;
3741 if (!RD->isDynamicClass())
3742 return false;
3744 // FIXME: this may need to be reconsidered if the key function
3745 // changes.
3746 // N.B. We must always emit the RTTI data ourselves if there exists a key
3747 // function.
3748 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3750 // Don't import the RTTI but emit it locally.
3751 if (CGM.getTriple().isWindowsGNUEnvironment())
3752 return false;
3754 if (CGM.getVTables().isVTableExternal(RD)) {
3755 if (CGM.getTarget().hasPS4DLLImportExport())
3756 return true;
3758 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3759 ? false
3760 : true;
3762 if (IsDLLImport)
3763 return true;
3766 return false;
3769 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
3770 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3771 return !RecordTy->getDecl()->isCompleteDefinition();
3774 /// ContainsIncompleteClassType - Returns whether the given type contains an
3775 /// incomplete class type. This is true if
3777 /// * The given type is an incomplete class type.
3778 /// * The given type is a pointer type whose pointee type contains an
3779 /// incomplete class type.
3780 /// * The given type is a member pointer type whose class is an incomplete
3781 /// class type.
3782 /// * The given type is a member pointer type whoise pointee type contains an
3783 /// incomplete class type.
3784 /// is an indirect or direct pointer to an incomplete class type.
3785 static bool ContainsIncompleteClassType(QualType Ty) {
3786 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3787 if (IsIncompleteClassType(RecordTy))
3788 return true;
3791 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3792 return ContainsIncompleteClassType(PointerTy->getPointeeType());
3794 if (const MemberPointerType *MemberPointerTy =
3795 dyn_cast<MemberPointerType>(Ty)) {
3796 // Check if the class type is incomplete.
3797 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3798 if (IsIncompleteClassType(ClassType))
3799 return true;
3801 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3804 return false;
3807 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3808 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3809 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3810 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3811 // Check the number of bases.
3812 if (RD->getNumBases() != 1)
3813 return false;
3815 // Get the base.
3816 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3818 // Check that the base is not virtual.
3819 if (Base->isVirtual())
3820 return false;
3822 // Check that the base is public.
3823 if (Base->getAccessSpecifier() != AS_public)
3824 return false;
3826 // Check that the class is dynamic iff the base is.
3827 auto *BaseDecl =
3828 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3829 if (!BaseDecl->isEmpty() &&
3830 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3831 return false;
3833 return true;
3836 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty,
3837 llvm::Constant *StorageAddress) {
3838 // abi::__class_type_info.
3839 static const char * const ClassTypeInfo =
3840 "_ZTVN10__cxxabiv117__class_type_infoE";
3841 // abi::__si_class_type_info.
3842 static const char * const SIClassTypeInfo =
3843 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3844 // abi::__vmi_class_type_info.
3845 static const char * const VMIClassTypeInfo =
3846 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3848 const char *VTableName = nullptr;
3850 switch (Ty->getTypeClass()) {
3851 #define TYPE(Class, Base)
3852 #define ABSTRACT_TYPE(Class, Base)
3853 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3854 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3855 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3856 #include "clang/AST/TypeNodes.inc"
3857 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3859 case Type::LValueReference:
3860 case Type::RValueReference:
3861 llvm_unreachable("References shouldn't get here");
3863 case Type::Auto:
3864 case Type::DeducedTemplateSpecialization:
3865 llvm_unreachable("Undeduced type shouldn't get here");
3867 case Type::Pipe:
3868 llvm_unreachable("Pipe types shouldn't get here");
3870 case Type::ArrayParameter:
3871 llvm_unreachable("Array Parameter types should not get here.");
3873 case Type::Builtin:
3874 case Type::BitInt:
3875 // GCC treats vector and complex types as fundamental types.
3876 case Type::Vector:
3877 case Type::ExtVector:
3878 case Type::ConstantMatrix:
3879 case Type::Complex:
3880 case Type::Atomic:
3881 // FIXME: GCC treats block pointers as fundamental types?!
3882 case Type::BlockPointer:
3883 // abi::__fundamental_type_info.
3884 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3885 break;
3887 case Type::ConstantArray:
3888 case Type::IncompleteArray:
3889 case Type::VariableArray:
3890 // abi::__array_type_info.
3891 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3892 break;
3894 case Type::FunctionNoProto:
3895 case Type::FunctionProto:
3896 // abi::__function_type_info.
3897 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3898 break;
3900 case Type::Enum:
3901 // abi::__enum_type_info.
3902 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3903 break;
3905 case Type::Record: {
3906 const CXXRecordDecl *RD =
3907 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3909 if (!RD->hasDefinition() || !RD->getNumBases()) {
3910 VTableName = ClassTypeInfo;
3911 } else if (CanUseSingleInheritance(RD)) {
3912 VTableName = SIClassTypeInfo;
3913 } else {
3914 VTableName = VMIClassTypeInfo;
3917 break;
3920 case Type::ObjCObject:
3921 // Ignore protocol qualifiers.
3922 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3924 // Handle id and Class.
3925 if (isa<BuiltinType>(Ty)) {
3926 VTableName = ClassTypeInfo;
3927 break;
3930 assert(isa<ObjCInterfaceType>(Ty));
3931 [[fallthrough]];
3933 case Type::ObjCInterface:
3934 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3935 VTableName = SIClassTypeInfo;
3936 } else {
3937 VTableName = ClassTypeInfo;
3939 break;
3941 case Type::ObjCObjectPointer:
3942 case Type::Pointer:
3943 // abi::__pointer_type_info.
3944 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3945 break;
3947 case Type::MemberPointer:
3948 // abi::__pointer_to_member_type_info.
3949 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3950 break;
3952 case Type::HLSLAttributedResource:
3953 llvm_unreachable("HLSL doesn't support virtual functions");
3956 llvm::Constant *VTable = nullptr;
3958 // Check if the alias exists. If it doesn't, then get or create the global.
3959 if (CGM.getItaniumVTableContext().isRelativeLayout())
3960 VTable = CGM.getModule().getNamedAlias(VTableName);
3961 if (!VTable) {
3962 llvm::Type *Ty = llvm::ArrayType::get(CGM.GlobalsInt8PtrTy, 0);
3963 VTable = CGM.getModule().getOrInsertGlobal(VTableName, Ty);
3966 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3968 llvm::Type *PtrDiffTy =
3969 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3971 // The vtable address point is 2.
3972 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3973 // The vtable address point is 8 bytes after its start:
3974 // 4 for the offset to top + 4 for the relative offset to rtti.
3975 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3976 VTable =
3977 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3978 } else {
3979 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3980 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.GlobalsInt8PtrTy,
3981 VTable, Two);
3984 if (const auto &Schema =
3985 CGM.getCodeGenOpts().PointerAuth.CXXTypeInfoVTablePointer)
3986 VTable = CGM.getConstantSignedPointer(
3987 VTable, Schema,
3988 Schema.isAddressDiscriminated() ? StorageAddress : nullptr,
3989 GlobalDecl(), QualType(Ty, 0));
3991 Fields.push_back(VTable);
3994 /// Return the linkage that the type info and type info name constants
3995 /// should have for the given type.
3996 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3997 QualType Ty) {
3998 // Itanium C++ ABI 2.9.5p7:
3999 // In addition, it and all of the intermediate abi::__pointer_type_info
4000 // structs in the chain down to the abi::__class_type_info for the
4001 // incomplete class type must be prevented from resolving to the
4002 // corresponding type_info structs for the complete class type, possibly
4003 // by making them local static objects. Finally, a dummy class RTTI is
4004 // generated for the incomplete type that will not resolve to the final
4005 // complete class RTTI (because the latter need not exist), possibly by
4006 // making it a local static object.
4007 if (ContainsIncompleteClassType(Ty))
4008 return llvm::GlobalValue::InternalLinkage;
4010 switch (Ty->getLinkage()) {
4011 case Linkage::Invalid:
4012 llvm_unreachable("Linkage hasn't been computed!");
4014 case Linkage::None:
4015 case Linkage::Internal:
4016 case Linkage::UniqueExternal:
4017 return llvm::GlobalValue::InternalLinkage;
4019 case Linkage::VisibleNone:
4020 case Linkage::Module:
4021 case Linkage::External:
4022 // RTTI is not enabled, which means that this type info struct is going
4023 // to be used for exception handling. Give it linkonce_odr linkage.
4024 if (!CGM.getLangOpts().RTTI)
4025 return llvm::GlobalValue::LinkOnceODRLinkage;
4027 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
4028 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
4029 if (RD->hasAttr<WeakAttr>())
4030 return llvm::GlobalValue::WeakODRLinkage;
4031 if (CGM.getTriple().isWindowsItaniumEnvironment())
4032 if (RD->hasAttr<DLLImportAttr>() &&
4033 ShouldUseExternalRTTIDescriptor(CGM, Ty))
4034 return llvm::GlobalValue::ExternalLinkage;
4035 // MinGW always uses LinkOnceODRLinkage for type info.
4036 if (RD->isDynamicClass() &&
4037 !CGM.getContext()
4038 .getTargetInfo()
4039 .getTriple()
4040 .isWindowsGNUEnvironment())
4041 return CGM.getVTableLinkage(RD);
4044 return llvm::GlobalValue::LinkOnceODRLinkage;
4047 llvm_unreachable("Invalid linkage!");
4050 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
4051 // We want to operate on the canonical type.
4052 Ty = Ty.getCanonicalType();
4054 // Check if we've already emitted an RTTI descriptor for this type.
4055 SmallString<256> Name;
4056 llvm::raw_svector_ostream Out(Name);
4057 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
4059 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
4060 if (OldGV && !OldGV->isDeclaration()) {
4061 assert(!OldGV->hasAvailableExternallyLinkage() &&
4062 "available_externally typeinfos not yet implemented");
4064 return OldGV;
4067 // Check if there is already an external RTTI descriptor for this type.
4068 if (IsStandardLibraryRTTIDescriptor(Ty) ||
4069 ShouldUseExternalRTTIDescriptor(CGM, Ty))
4070 return GetAddrOfExternalRTTIDescriptor(Ty);
4072 // Emit the standard library with external linkage.
4073 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
4075 // Give the type_info object and name the formal visibility of the
4076 // type itself.
4077 llvm::GlobalValue::VisibilityTypes llvmVisibility;
4078 if (llvm::GlobalValue::isLocalLinkage(Linkage))
4079 // If the linkage is local, only default visibility makes sense.
4080 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
4081 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
4082 ItaniumCXXABI::RUK_NonUniqueHidden)
4083 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
4084 else
4085 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
4087 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4088 llvm::GlobalValue::DefaultStorageClass;
4089 if (auto RD = Ty->getAsCXXRecordDecl()) {
4090 if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
4091 RD->hasAttr<DLLExportAttr>()) ||
4092 (CGM.shouldMapVisibilityToDLLExport(RD) &&
4093 !llvm::GlobalValue::isLocalLinkage(Linkage) &&
4094 llvmVisibility == llvm::GlobalValue::DefaultVisibility))
4095 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
4097 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
4100 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
4101 QualType Ty,
4102 llvm::GlobalVariable::LinkageTypes Linkage,
4103 llvm::GlobalValue::VisibilityTypes Visibility,
4104 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
4105 SmallString<256> Name;
4106 llvm::raw_svector_ostream Out(Name);
4107 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
4108 llvm::Module &M = CGM.getModule();
4109 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
4110 // int8 is an arbitrary type to be replaced later with replaceInitializer.
4111 llvm::GlobalVariable *GV =
4112 new llvm::GlobalVariable(M, CGM.Int8Ty, /*isConstant=*/true, Linkage,
4113 /*Initializer=*/nullptr, Name);
4115 // Add the vtable pointer.
4116 BuildVTablePointer(cast<Type>(Ty), GV);
4118 // And the name.
4119 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
4120 llvm::Constant *TypeNameField;
4122 // If we're supposed to demote the visibility, be sure to set a flag
4123 // to use a string comparison for type_info comparisons.
4124 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
4125 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
4126 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
4127 // The flag is the sign bit, which on ARM64 is defined to be clear
4128 // for global pointers. This is very ARM64-specific.
4129 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
4130 llvm::Constant *flag =
4131 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
4132 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
4133 TypeNameField =
4134 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.GlobalsInt8PtrTy);
4135 } else {
4136 TypeNameField = TypeName;
4138 Fields.push_back(TypeNameField);
4140 switch (Ty->getTypeClass()) {
4141 #define TYPE(Class, Base)
4142 #define ABSTRACT_TYPE(Class, Base)
4143 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
4144 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4145 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
4146 #include "clang/AST/TypeNodes.inc"
4147 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
4149 // GCC treats vector types as fundamental types.
4150 case Type::Builtin:
4151 case Type::Vector:
4152 case Type::ExtVector:
4153 case Type::ConstantMatrix:
4154 case Type::Complex:
4155 case Type::BlockPointer:
4156 // Itanium C++ ABI 2.9.5p4:
4157 // abi::__fundamental_type_info adds no data members to std::type_info.
4158 break;
4160 case Type::LValueReference:
4161 case Type::RValueReference:
4162 llvm_unreachable("References shouldn't get here");
4164 case Type::Auto:
4165 case Type::DeducedTemplateSpecialization:
4166 llvm_unreachable("Undeduced type shouldn't get here");
4168 case Type::Pipe:
4169 break;
4171 case Type::BitInt:
4172 break;
4174 case Type::ConstantArray:
4175 case Type::IncompleteArray:
4176 case Type::VariableArray:
4177 case Type::ArrayParameter:
4178 // Itanium C++ ABI 2.9.5p5:
4179 // abi::__array_type_info adds no data members to std::type_info.
4180 break;
4182 case Type::FunctionNoProto:
4183 case Type::FunctionProto:
4184 // Itanium C++ ABI 2.9.5p5:
4185 // abi::__function_type_info adds no data members to std::type_info.
4186 break;
4188 case Type::Enum:
4189 // Itanium C++ ABI 2.9.5p5:
4190 // abi::__enum_type_info adds no data members to std::type_info.
4191 break;
4193 case Type::Record: {
4194 const CXXRecordDecl *RD =
4195 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
4196 if (!RD->hasDefinition() || !RD->getNumBases()) {
4197 // We don't need to emit any fields.
4198 break;
4201 if (CanUseSingleInheritance(RD))
4202 BuildSIClassTypeInfo(RD);
4203 else
4204 BuildVMIClassTypeInfo(RD);
4206 break;
4209 case Type::ObjCObject:
4210 case Type::ObjCInterface:
4211 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
4212 break;
4214 case Type::ObjCObjectPointer:
4215 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
4216 break;
4218 case Type::Pointer:
4219 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
4220 break;
4222 case Type::MemberPointer:
4223 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
4224 break;
4226 case Type::Atomic:
4227 // No fields, at least for the moment.
4228 break;
4230 case Type::HLSLAttributedResource:
4231 llvm_unreachable("HLSL doesn't support RTTI");
4234 GV->replaceInitializer(llvm::ConstantStruct::getAnon(Fields));
4236 // Export the typeinfo in the same circumstances as the vtable is exported.
4237 auto GVDLLStorageClass = DLLStorageClass;
4238 if (CGM.getTarget().hasPS4DLLImportExport() &&
4239 GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
4240 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
4241 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
4242 if (RD->hasAttr<DLLExportAttr>() ||
4243 CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
4244 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
4248 // If there's already an old global variable, replace it with the new one.
4249 if (OldGV) {
4250 GV->takeName(OldGV);
4251 OldGV->replaceAllUsesWith(GV);
4252 OldGV->eraseFromParent();
4255 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
4256 GV->setComdat(M.getOrInsertComdat(GV->getName()));
4258 CharUnits Align = CGM.getContext().toCharUnitsFromBits(
4259 CGM.getTarget().getPointerAlign(CGM.GetGlobalVarAddressSpace(nullptr)));
4260 GV->setAlignment(Align.getAsAlign());
4262 // The Itanium ABI specifies that type_info objects must be globally
4263 // unique, with one exception: if the type is an incomplete class
4264 // type or a (possibly indirect) pointer to one. That exception
4265 // affects the general case of comparing type_info objects produced
4266 // by the typeid operator, which is why the comparison operators on
4267 // std::type_info generally use the type_info name pointers instead
4268 // of the object addresses. However, the language's built-in uses
4269 // of RTTI generally require class types to be complete, even when
4270 // manipulating pointers to those class types. This allows the
4271 // implementation of dynamic_cast to rely on address equality tests,
4272 // which is much faster.
4274 // All of this is to say that it's important that both the type_info
4275 // object and the type_info name be uniqued when weakly emitted.
4277 TypeName->setVisibility(Visibility);
4278 CGM.setDSOLocal(TypeName);
4280 GV->setVisibility(Visibility);
4281 CGM.setDSOLocal(GV);
4283 TypeName->setDLLStorageClass(DLLStorageClass);
4284 GV->setDLLStorageClass(GVDLLStorageClass);
4286 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4287 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
4289 return GV;
4292 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
4293 /// for the given Objective-C object type.
4294 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
4295 // Drop qualifiers.
4296 const Type *T = OT->getBaseType().getTypePtr();
4297 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
4299 // The builtin types are abi::__class_type_infos and don't require
4300 // extra fields.
4301 if (isa<BuiltinType>(T)) return;
4303 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
4304 ObjCInterfaceDecl *Super = Class->getSuperClass();
4306 // Root classes are also __class_type_info.
4307 if (!Super) return;
4309 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
4311 // Everything else is single inheritance.
4312 llvm::Constant *BaseTypeInfo =
4313 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
4314 Fields.push_back(BaseTypeInfo);
4317 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
4318 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
4319 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
4320 // Itanium C++ ABI 2.9.5p6b:
4321 // It adds to abi::__class_type_info a single member pointing to the
4322 // type_info structure for the base type,
4323 llvm::Constant *BaseTypeInfo =
4324 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
4325 Fields.push_back(BaseTypeInfo);
4328 namespace {
4329 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
4330 /// a class hierarchy.
4331 struct SeenBases {
4332 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
4333 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
4337 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
4338 /// abi::__vmi_class_type_info.
4340 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
4341 SeenBases &Bases) {
4343 unsigned Flags = 0;
4345 auto *BaseDecl =
4346 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
4348 if (Base->isVirtual()) {
4349 // Mark the virtual base as seen.
4350 if (!Bases.VirtualBases.insert(BaseDecl).second) {
4351 // If this virtual base has been seen before, then the class is diamond
4352 // shaped.
4353 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
4354 } else {
4355 if (Bases.NonVirtualBases.count(BaseDecl))
4356 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4358 } else {
4359 // Mark the non-virtual base as seen.
4360 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
4361 // If this non-virtual base has been seen before, then the class has non-
4362 // diamond shaped repeated inheritance.
4363 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4364 } else {
4365 if (Bases.VirtualBases.count(BaseDecl))
4366 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4370 // Walk all bases.
4371 for (const auto &I : BaseDecl->bases())
4372 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4374 return Flags;
4377 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
4378 unsigned Flags = 0;
4379 SeenBases Bases;
4381 // Walk all bases.
4382 for (const auto &I : RD->bases())
4383 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4385 return Flags;
4388 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4389 /// classes with bases that do not satisfy the abi::__si_class_type_info
4390 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4391 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4392 llvm::Type *UnsignedIntLTy =
4393 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4395 // Itanium C++ ABI 2.9.5p6c:
4396 // __flags is a word with flags describing details about the class
4397 // structure, which may be referenced by using the __flags_masks
4398 // enumeration. These flags refer to both direct and indirect bases.
4399 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4400 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4402 // Itanium C++ ABI 2.9.5p6c:
4403 // __base_count is a word with the number of direct proper base class
4404 // descriptions that follow.
4405 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4407 if (!RD->getNumBases())
4408 return;
4410 // Now add the base class descriptions.
4412 // Itanium C++ ABI 2.9.5p6c:
4413 // __base_info[] is an array of base class descriptions -- one for every
4414 // direct proper base. Each description is of the type:
4416 // struct abi::__base_class_type_info {
4417 // public:
4418 // const __class_type_info *__base_type;
4419 // long __offset_flags;
4421 // enum __offset_flags_masks {
4422 // __virtual_mask = 0x1,
4423 // __public_mask = 0x2,
4424 // __offset_shift = 8
4425 // };
4426 // };
4428 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4429 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4430 // LLP64 platforms.
4431 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4432 // LLP64 platforms.
4433 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4434 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4435 if (TI.getTriple().isOSCygMing() &&
4436 TI.getPointerWidth(LangAS::Default) > TI.getLongWidth())
4437 OffsetFlagsTy = CGM.getContext().LongLongTy;
4438 llvm::Type *OffsetFlagsLTy =
4439 CGM.getTypes().ConvertType(OffsetFlagsTy);
4441 for (const auto &Base : RD->bases()) {
4442 // The __base_type member points to the RTTI for the base type.
4443 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4445 auto *BaseDecl =
4446 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4448 int64_t OffsetFlags = 0;
4450 // All but the lower 8 bits of __offset_flags are a signed offset.
4451 // For a non-virtual base, this is the offset in the object of the base
4452 // subobject. For a virtual base, this is the offset in the virtual table of
4453 // the virtual base offset for the virtual base referenced (negative).
4454 CharUnits Offset;
4455 if (Base.isVirtual())
4456 Offset =
4457 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
4458 else {
4459 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4460 Offset = Layout.getBaseClassOffset(BaseDecl);
4463 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4465 // The low-order byte of __offset_flags contains flags, as given by the
4466 // masks from the enumeration __offset_flags_masks.
4467 if (Base.isVirtual())
4468 OffsetFlags |= BCTI_Virtual;
4469 if (Base.getAccessSpecifier() == AS_public)
4470 OffsetFlags |= BCTI_Public;
4472 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4476 /// Compute the flags for a __pbase_type_info, and remove the corresponding
4477 /// pieces from \p Type.
4478 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4479 unsigned Flags = 0;
4481 if (Type.isConstQualified())
4482 Flags |= ItaniumRTTIBuilder::PTI_Const;
4483 if (Type.isVolatileQualified())
4484 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4485 if (Type.isRestrictQualified())
4486 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4487 Type = Type.getUnqualifiedType();
4489 // Itanium C++ ABI 2.9.5p7:
4490 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4491 // incomplete class type, the incomplete target type flag is set.
4492 if (ContainsIncompleteClassType(Type))
4493 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4495 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4496 if (Proto->isNothrow()) {
4497 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4498 Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4502 return Flags;
4505 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4506 /// used for pointer types.
4507 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4508 // Itanium C++ ABI 2.9.5p7:
4509 // __flags is a flag word describing the cv-qualification and other
4510 // attributes of the type pointed to
4511 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4513 llvm::Type *UnsignedIntLTy =
4514 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4515 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4517 // Itanium C++ ABI 2.9.5p7:
4518 // __pointee is a pointer to the std::type_info derivation for the
4519 // unqualified type being pointed to.
4520 llvm::Constant *PointeeTypeInfo =
4521 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4522 Fields.push_back(PointeeTypeInfo);
4525 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4526 /// struct, used for member pointer types.
4527 void
4528 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4529 QualType PointeeTy = Ty->getPointeeType();
4531 // Itanium C++ ABI 2.9.5p7:
4532 // __flags is a flag word describing the cv-qualification and other
4533 // attributes of the type pointed to.
4534 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4536 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4537 if (IsIncompleteClassType(ClassType))
4538 Flags |= PTI_ContainingClassIncomplete;
4540 llvm::Type *UnsignedIntLTy =
4541 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4542 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4544 // Itanium C++ ABI 2.9.5p7:
4545 // __pointee is a pointer to the std::type_info derivation for the
4546 // unqualified type being pointed to.
4547 llvm::Constant *PointeeTypeInfo =
4548 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4549 Fields.push_back(PointeeTypeInfo);
4551 // Itanium C++ ABI 2.9.5p9:
4552 // __context is a pointer to an abi::__class_type_info corresponding to the
4553 // class type containing the member pointed to
4554 // (e.g., the "A" in "int A::*").
4555 Fields.push_back(
4556 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4559 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4560 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4563 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4564 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4565 QualType FundamentalTypes[] = {
4566 getContext().VoidTy, getContext().NullPtrTy,
4567 getContext().BoolTy, getContext().WCharTy,
4568 getContext().CharTy, getContext().UnsignedCharTy,
4569 getContext().SignedCharTy, getContext().ShortTy,
4570 getContext().UnsignedShortTy, getContext().IntTy,
4571 getContext().UnsignedIntTy, getContext().LongTy,
4572 getContext().UnsignedLongTy, getContext().LongLongTy,
4573 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4574 getContext().UnsignedInt128Ty, getContext().HalfTy,
4575 getContext().FloatTy, getContext().DoubleTy,
4576 getContext().LongDoubleTy, getContext().Float128Ty,
4577 getContext().Char8Ty, getContext().Char16Ty,
4578 getContext().Char32Ty
4580 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4581 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
4582 ? llvm::GlobalValue::DLLExportStorageClass
4583 : llvm::GlobalValue::DefaultStorageClass;
4584 llvm::GlobalValue::VisibilityTypes Visibility =
4585 CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4586 for (const QualType &FundamentalType : FundamentalTypes) {
4587 QualType PointerType = getContext().getPointerType(FundamentalType);
4588 QualType PointerTypeConst = getContext().getPointerType(
4589 FundamentalType.withConst());
4590 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4591 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4592 Type, llvm::GlobalValue::ExternalLinkage,
4593 Visibility, DLLStorageClass);
4597 /// What sort of uniqueness rules should we use for the RTTI for the
4598 /// given type?
4599 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4600 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4601 if (shouldRTTIBeUnique())
4602 return RUK_Unique;
4604 // It's only necessary for linkonce_odr or weak_odr linkage.
4605 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4606 Linkage != llvm::GlobalValue::WeakODRLinkage)
4607 return RUK_Unique;
4609 // It's only necessary with default visibility.
4610 if (CanTy->getVisibility() != DefaultVisibility)
4611 return RUK_Unique;
4613 // If we're not required to publish this symbol, hide it.
4614 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4615 return RUK_NonUniqueHidden;
4617 // If we're required to publish this symbol, as we might be under an
4618 // explicit instantiation, leave it with default visibility but
4619 // enable string-comparisons.
4620 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4621 return RUK_NonUniqueVisible;
4624 // Find out how to codegen the complete destructor and constructor
4625 namespace {
4626 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4628 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4629 const CXXMethodDecl *MD) {
4630 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4631 return StructorCodegen::Emit;
4633 // The complete and base structors are not equivalent if there are any virtual
4634 // bases, so emit separate functions.
4635 if (MD->getParent()->getNumVBases())
4636 return StructorCodegen::Emit;
4638 GlobalDecl AliasDecl;
4639 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4640 AliasDecl = GlobalDecl(DD, Dtor_Complete);
4641 } else {
4642 const auto *CD = cast<CXXConstructorDecl>(MD);
4643 AliasDecl = GlobalDecl(CD, Ctor_Complete);
4645 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4647 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4648 return StructorCodegen::RAUW;
4650 // FIXME: Should we allow available_externally aliases?
4651 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4652 return StructorCodegen::RAUW;
4654 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4655 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4656 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4657 CGM.getTarget().getTriple().isOSBinFormatWasm())
4658 return StructorCodegen::COMDAT;
4659 return StructorCodegen::Emit;
4662 return StructorCodegen::Alias;
4665 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4666 GlobalDecl AliasDecl,
4667 GlobalDecl TargetDecl) {
4668 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4670 StringRef MangledName = CGM.getMangledName(AliasDecl);
4671 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4672 if (Entry && !Entry->isDeclaration())
4673 return;
4675 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4677 // Create the alias with no name.
4678 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4680 // Constructors and destructors are always unnamed_addr.
4681 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4683 // Switch any previous uses to the alias.
4684 if (Entry) {
4685 assert(Entry->getType() == Aliasee->getType() &&
4686 "declaration exists with different type");
4687 Alias->takeName(Entry);
4688 Entry->replaceAllUsesWith(Alias);
4689 Entry->eraseFromParent();
4690 } else {
4691 Alias->setName(MangledName);
4694 // Finally, set up the alias with its proper name and attributes.
4695 CGM.SetCommonAttributes(AliasDecl, Alias);
4698 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4699 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4700 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4701 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4703 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4705 if (CD ? GD.getCtorType() == Ctor_Complete
4706 : GD.getDtorType() == Dtor_Complete) {
4707 GlobalDecl BaseDecl;
4708 if (CD)
4709 BaseDecl = GD.getWithCtorType(Ctor_Base);
4710 else
4711 BaseDecl = GD.getWithDtorType(Dtor_Base);
4713 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4714 emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4715 return;
4718 if (CGType == StructorCodegen::RAUW) {
4719 StringRef MangledName = CGM.getMangledName(GD);
4720 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4721 CGM.addReplacement(MangledName, Aliasee);
4722 return;
4726 // The base destructor is equivalent to the base destructor of its
4727 // base class if there is exactly one non-virtual base class with a
4728 // non-trivial destructor, there are no fields with a non-trivial
4729 // destructor, and the body of the destructor is trivial.
4730 if (DD && GD.getDtorType() == Dtor_Base &&
4731 CGType != StructorCodegen::COMDAT &&
4732 !CGM.TryEmitBaseDestructorAsAlias(DD))
4733 return;
4735 // FIXME: The deleting destructor is equivalent to the selected operator
4736 // delete if:
4737 // * either the delete is a destroying operator delete or the destructor
4738 // would be trivial if it weren't virtual,
4739 // * the conversion from the 'this' parameter to the first parameter of the
4740 // destructor is equivalent to a bitcast,
4741 // * the destructor does not have an implicit "this" return, and
4742 // * the operator delete has the same calling convention and IR function type
4743 // as the destructor.
4744 // In such cases we should try to emit the deleting dtor as an alias to the
4745 // selected 'operator delete'.
4747 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4749 if (CGType == StructorCodegen::COMDAT) {
4750 SmallString<256> Buffer;
4751 llvm::raw_svector_ostream Out(Buffer);
4752 if (DD)
4753 getMangleContext().mangleCXXDtorComdat(DD, Out);
4754 else
4755 getMangleContext().mangleCXXCtorComdat(CD, Out);
4756 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4757 Fn->setComdat(C);
4758 } else {
4759 CGM.maybeSetTrivialComdat(*MD, *Fn);
4763 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4764 // void *__cxa_begin_catch(void*);
4765 llvm::FunctionType *FTy = llvm::FunctionType::get(
4766 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4768 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4771 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4772 // void __cxa_end_catch();
4773 llvm::FunctionType *FTy =
4774 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4776 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4779 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4780 // void *__cxa_get_exception_ptr(void*);
4781 llvm::FunctionType *FTy = llvm::FunctionType::get(
4782 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4784 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4787 namespace {
4788 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4789 /// exception type lets us state definitively that the thrown exception
4790 /// type does not have a destructor. In particular:
4791 /// - Catch-alls tell us nothing, so we have to conservatively
4792 /// assume that the thrown exception might have a destructor.
4793 /// - Catches by reference behave according to their base types.
4794 /// - Catches of non-record types will only trigger for exceptions
4795 /// of non-record types, which never have destructors.
4796 /// - Catches of record types can trigger for arbitrary subclasses
4797 /// of the caught type, so we have to assume the actual thrown
4798 /// exception type might have a throwing destructor, even if the
4799 /// caught type's destructor is trivial or nothrow.
4800 struct CallEndCatch final : EHScopeStack::Cleanup {
4801 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4802 bool MightThrow;
4804 void Emit(CodeGenFunction &CGF, Flags flags) override {
4805 if (!MightThrow) {
4806 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4807 return;
4810 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4815 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4816 /// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume
4817 /// that the exception object's dtor is nothrow, therefore the __cxa_end_catch
4818 /// call can be marked as nounwind even if EndMightThrow is true.
4820 /// \param EndMightThrow - true if __cxa_end_catch might throw
4821 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4822 llvm::Value *Exn,
4823 bool EndMightThrow) {
4824 llvm::CallInst *call =
4825 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4827 CGF.EHStack.pushCleanup<CallEndCatch>(
4828 NormalAndEHCleanup,
4829 EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor);
4831 return call;
4834 /// A "special initializer" callback for initializing a catch
4835 /// parameter during catch initialization.
4836 static void InitCatchParam(CodeGenFunction &CGF,
4837 const VarDecl &CatchParam,
4838 Address ParamAddr,
4839 SourceLocation Loc) {
4840 // Load the exception from where the landing pad saved it.
4841 llvm::Value *Exn = CGF.getExceptionFromSlot();
4843 CanQualType CatchType =
4844 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4845 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4847 // If we're catching by reference, we can just cast the object
4848 // pointer to the appropriate pointer.
4849 if (isa<ReferenceType>(CatchType)) {
4850 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4851 bool EndCatchMightThrow = CaughtType->isRecordType();
4853 // __cxa_begin_catch returns the adjusted object pointer.
4854 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4856 // We have no way to tell the personality function that we're
4857 // catching by reference, so if we're catching a pointer,
4858 // __cxa_begin_catch will actually return that pointer by value.
4859 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4860 QualType PointeeType = PT->getPointeeType();
4862 // When catching by reference, generally we should just ignore
4863 // this by-value pointer and use the exception object instead.
4864 if (!PointeeType->isRecordType()) {
4866 // Exn points to the struct _Unwind_Exception header, which
4867 // we have to skip past in order to reach the exception data.
4868 unsigned HeaderSize =
4869 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4870 AdjustedExn =
4871 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4873 // However, if we're catching a pointer-to-record type that won't
4874 // work, because the personality function might have adjusted
4875 // the pointer. There's actually no way for us to fully satisfy
4876 // the language/ABI contract here: we can't use Exn because it
4877 // might have the wrong adjustment, but we can't use the by-value
4878 // pointer because it's off by a level of abstraction.
4880 // The current solution is to dump the adjusted pointer into an
4881 // alloca, which breaks language semantics (because changing the
4882 // pointer doesn't change the exception) but at least works.
4883 // The better solution would be to filter out non-exact matches
4884 // and rethrow them, but this is tricky because the rethrow
4885 // really needs to be catchable by other sites at this landing
4886 // pad. The best solution is to fix the personality function.
4887 } else {
4888 // Pull the pointer for the reference type off.
4889 llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4891 // Create the temporary and write the adjusted pointer into it.
4892 Address ExnPtrTmp =
4893 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4894 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4895 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4897 // Bind the reference to the temporary.
4898 AdjustedExn = ExnPtrTmp.emitRawPointer(CGF);
4902 llvm::Value *ExnCast =
4903 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4904 CGF.Builder.CreateStore(ExnCast, ParamAddr);
4905 return;
4908 // Scalars and complexes.
4909 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4910 if (TEK != TEK_Aggregate) {
4911 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4913 // If the catch type is a pointer type, __cxa_begin_catch returns
4914 // the pointer by value.
4915 if (CatchType->hasPointerRepresentation()) {
4916 llvm::Value *CastExn =
4917 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4919 switch (CatchType.getQualifiers().getObjCLifetime()) {
4920 case Qualifiers::OCL_Strong:
4921 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4922 [[fallthrough]];
4924 case Qualifiers::OCL_None:
4925 case Qualifiers::OCL_ExplicitNone:
4926 case Qualifiers::OCL_Autoreleasing:
4927 CGF.Builder.CreateStore(CastExn, ParamAddr);
4928 return;
4930 case Qualifiers::OCL_Weak:
4931 CGF.EmitARCInitWeak(ParamAddr, CastExn);
4932 return;
4934 llvm_unreachable("bad ownership qualifier!");
4937 // Otherwise, it returns a pointer into the exception object.
4939 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(AdjustedExn, CatchType);
4940 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4941 switch (TEK) {
4942 case TEK_Complex:
4943 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4944 /*init*/ true);
4945 return;
4946 case TEK_Scalar: {
4947 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4948 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4949 return;
4951 case TEK_Aggregate:
4952 llvm_unreachable("evaluation kind filtered out!");
4954 llvm_unreachable("bad evaluation kind");
4957 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4958 auto catchRD = CatchType->getAsCXXRecordDecl();
4959 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4961 llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
4963 // Check for a copy expression. If we don't have a copy expression,
4964 // that means a trivial copy is okay.
4965 const Expr *copyExpr = CatchParam.getInit();
4966 if (!copyExpr) {
4967 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4968 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4969 LLVMCatchTy, caughtExnAlignment);
4970 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4971 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4972 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4973 return;
4976 // We have to call __cxa_get_exception_ptr to get the adjusted
4977 // pointer before copying.
4978 llvm::CallInst *rawAdjustedExn =
4979 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4981 // Cast that to the appropriate type.
4982 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4983 LLVMCatchTy, caughtExnAlignment);
4985 // The copy expression is defined in terms of an OpaqueValueExpr.
4986 // Find it and map it to the adjusted expression.
4987 CodeGenFunction::OpaqueValueMapping
4988 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4989 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4991 // Call the copy ctor in a terminate scope.
4992 CGF.EHStack.pushTerminate();
4994 // Perform the copy construction.
4995 CGF.EmitAggExpr(copyExpr,
4996 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4997 AggValueSlot::IsNotDestructed,
4998 AggValueSlot::DoesNotNeedGCBarriers,
4999 AggValueSlot::IsNotAliased,
5000 AggValueSlot::DoesNotOverlap));
5002 // Leave the terminate scope.
5003 CGF.EHStack.popTerminate();
5005 // Undo the opaque value mapping.
5006 opaque.pop();
5008 // Finally we can call __cxa_begin_catch.
5009 CallBeginCatch(CGF, Exn, true);
5012 /// Begins a catch statement by initializing the catch variable and
5013 /// calling __cxa_begin_catch.
5014 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
5015 const CXXCatchStmt *S) {
5016 // We have to be very careful with the ordering of cleanups here:
5017 // C++ [except.throw]p4:
5018 // The destruction [of the exception temporary] occurs
5019 // immediately after the destruction of the object declared in
5020 // the exception-declaration in the handler.
5022 // So the precise ordering is:
5023 // 1. Construct catch variable.
5024 // 2. __cxa_begin_catch
5025 // 3. Enter __cxa_end_catch cleanup
5026 // 4. Enter dtor cleanup
5028 // We do this by using a slightly abnormal initialization process.
5029 // Delegation sequence:
5030 // - ExitCXXTryStmt opens a RunCleanupsScope
5031 // - EmitAutoVarAlloca creates the variable and debug info
5032 // - InitCatchParam initializes the variable from the exception
5033 // - CallBeginCatch calls __cxa_begin_catch
5034 // - CallBeginCatch enters the __cxa_end_catch cleanup
5035 // - EmitAutoVarCleanups enters the variable destructor cleanup
5036 // - EmitCXXTryStmt emits the code for the catch body
5037 // - EmitCXXTryStmt close the RunCleanupsScope
5039 VarDecl *CatchParam = S->getExceptionDecl();
5040 if (!CatchParam) {
5041 llvm::Value *Exn = CGF.getExceptionFromSlot();
5042 CallBeginCatch(CGF, Exn, true);
5043 return;
5046 // Emit the local.
5047 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
5048 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
5049 CGF.EmitAutoVarCleanups(var);
5052 /// Get or define the following function:
5053 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
5054 /// This code is used only in C++.
5055 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
5056 ASTContext &C = CGM.getContext();
5057 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
5058 C.VoidTy, {C.getPointerType(C.CharTy)});
5059 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(FI);
5060 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
5061 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
5062 llvm::Function *fn =
5063 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
5064 if (fn->empty()) {
5065 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false);
5066 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, fn);
5067 fn->setDoesNotThrow();
5068 fn->setDoesNotReturn();
5070 // What we really want is to massively penalize inlining without
5071 // forbidding it completely. The difference between that and
5072 // 'noinline' is negligible.
5073 fn->addFnAttr(llvm::Attribute::NoInline);
5075 // Allow this function to be shared across translation units, but
5076 // we don't want it to turn into an exported symbol.
5077 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
5078 fn->setVisibility(llvm::Function::HiddenVisibility);
5079 if (CGM.supportsCOMDAT())
5080 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
5082 // Set up the function.
5083 llvm::BasicBlock *entry =
5084 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
5085 CGBuilderTy builder(CGM, entry);
5087 // Pull the exception pointer out of the parameter list.
5088 llvm::Value *exn = &*fn->arg_begin();
5090 // Call __cxa_begin_catch(exn).
5091 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
5092 catchCall->setDoesNotThrow();
5093 catchCall->setCallingConv(CGM.getRuntimeCC());
5095 // Call std::terminate().
5096 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
5097 termCall->setDoesNotThrow();
5098 termCall->setDoesNotReturn();
5099 termCall->setCallingConv(CGM.getRuntimeCC());
5101 // std::terminate cannot return.
5102 builder.CreateUnreachable();
5104 return fnRef;
5107 llvm::CallInst *
5108 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5109 llvm::Value *Exn) {
5110 // In C++, we want to call __cxa_begin_catch() before terminating.
5111 if (Exn) {
5112 assert(CGF.CGM.getLangOpts().CPlusPlus);
5113 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
5115 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
5118 std::pair<llvm::Value *, const CXXRecordDecl *>
5119 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
5120 const CXXRecordDecl *RD) {
5121 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
5124 llvm::Constant *
5125 ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) {
5126 const CXXMethodDecl *origMD =
5127 cast<CXXMethodDecl>(CGM.getItaniumVTableContext()
5128 .findOriginalMethod(MD->getCanonicalDecl())
5129 .getDecl());
5130 llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(origMD);
5131 QualType funcType = CGM.getContext().getMemberPointerType(
5132 MD->getType(), MD->getParent()->getTypeForDecl());
5133 return CGM.getMemberFunctionPointer(thunk, funcType);
5136 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
5137 const CXXCatchStmt *C) {
5138 if (CGF.getTarget().hasFeature("exception-handling"))
5139 CGF.EHStack.pushCleanup<CatchRetScope>(
5140 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
5141 ItaniumCXXABI::emitBeginCatch(CGF, C);
5144 llvm::CallInst *
5145 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
5146 llvm::Value *Exn) {
5147 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
5148 // the violating exception to mark it handled, but it is currently hard to do
5149 // with wasm EH instruction structure with catch/catch_all, we just call
5150 // std::terminate and ignore the violating exception as in CGCXXABI.
5151 // TODO Consider code transformation that makes calling __clang_call_terminate
5152 // possible.
5153 return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
5156 /// Register a global destructor as best as we know how.
5157 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
5158 llvm::FunctionCallee Dtor,
5159 llvm::Constant *Addr) {
5160 if (D.getTLSKind() != VarDecl::TLS_None) {
5161 llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
5163 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
5164 llvm::FunctionType *AtExitTy =
5165 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, PtrTy}, true);
5167 // Fetch the actual function.
5168 llvm::FunctionCallee AtExit =
5169 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
5171 // Create __dtor function for the var decl.
5172 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
5174 // Register above __dtor with atexit().
5175 // First param is flags and must be 0, second param is function ptr
5176 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
5177 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
5179 // Cannot unregister TLS __dtor so done
5180 return;
5183 // Create __dtor function for the var decl.
5184 llvm::Function *DtorStub =
5185 cast<llvm::Function>(CGF.createAtExitStub(D, Dtor, Addr));
5187 // Register above __dtor with atexit().
5188 CGF.registerGlobalDtorWithAtExit(DtorStub);
5190 // Emit __finalize function to unregister __dtor and (as appropriate) call
5191 // __dtor.
5192 emitCXXStermFinalizer(D, DtorStub, Addr);
5195 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
5196 llvm::Constant *addr) {
5197 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
5198 SmallString<256> FnName;
5200 llvm::raw_svector_ostream Out(FnName);
5201 getMangleContext().mangleDynamicStermFinalizer(&D, Out);
5204 // Create the finalization action associated with a variable.
5205 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
5206 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
5207 FTy, FnName.str(), FI, D.getLocation());
5209 CodeGenFunction CGF(CGM);
5211 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
5212 FunctionArgList(), D.getLocation(),
5213 D.getInit()->getExprLoc());
5215 // The unatexit subroutine unregisters __dtor functions that were previously
5216 // registered by the atexit subroutine. If the referenced function is found,
5217 // the unatexit returns a value of 0, meaning that the cleanup is still
5218 // pending (and we should call the __dtor function).
5219 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
5221 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
5223 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
5224 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
5226 // Check if unatexit returns a value of 0. If it does, jump to
5227 // DestructCallBlock, otherwise jump to EndBlock directly.
5228 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
5230 CGF.EmitBlock(DestructCallBlock);
5232 // Emit the call to dtorStub.
5233 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
5235 // Make sure the call and the callee agree on calling convention.
5236 CI->setCallingConv(dtorStub->getCallingConv());
5238 CGF.EmitBlock(EndBlock);
5240 CGF.FinishFunction();
5242 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
5243 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
5244 IPA->getPriority());
5245 } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
5246 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
5247 // According to C++ [basic.start.init]p2, class template static data
5248 // members (i.e., implicitly or explicitly instantiated specializations)
5249 // have unordered initialization. As a consequence, we can put them into
5250 // their own llvm.global_dtors entry.
5251 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
5252 } else {
5253 CGM.AddCXXStermFinalizerEntry(StermFinalizer);