[AMDGPU][AsmParser][NFC] Get rid of custom default operand handlers.
[llvm-project.git] / clang / lib / CodeGen / CodeGenModule.cpp
blobb5bd9f7c94f56ee9d418e513c3e171a394558d5e
1 //===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-module state used while generating code.
11 //===----------------------------------------------------------------------===//
13 #include "CodeGenModule.h"
14 #include "ABIInfo.h"
15 #include "CGBlocks.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGCall.h"
19 #include "CGDebugInfo.h"
20 #include "CGHLSLRuntime.h"
21 #include "CGObjCRuntime.h"
22 #include "CGOpenCLRuntime.h"
23 #include "CGOpenMPRuntime.h"
24 #include "CGOpenMPRuntimeGPU.h"
25 #include "CodeGenFunction.h"
26 #include "CodeGenPGO.h"
27 #include "ConstantEmitter.h"
28 #include "CoverageMappingGen.h"
29 #include "TargetInfo.h"
30 #include "clang/AST/ASTContext.h"
31 #include "clang/AST/CharUnits.h"
32 #include "clang/AST/DeclCXX.h"
33 #include "clang/AST/DeclObjC.h"
34 #include "clang/AST/DeclTemplate.h"
35 #include "clang/AST/Mangle.h"
36 #include "clang/AST/RecursiveASTVisitor.h"
37 #include "clang/AST/StmtVisitor.h"
38 #include "clang/Basic/Builtins.h"
39 #include "clang/Basic/CharInfo.h"
40 #include "clang/Basic/CodeGenOptions.h"
41 #include "clang/Basic/Diagnostic.h"
42 #include "clang/Basic/FileManager.h"
43 #include "clang/Basic/Module.h"
44 #include "clang/Basic/SourceManager.h"
45 #include "clang/Basic/TargetInfo.h"
46 #include "clang/Basic/Version.h"
47 #include "clang/CodeGen/BackendUtil.h"
48 #include "clang/CodeGen/ConstantInitBuilder.h"
49 #include "clang/Frontend/FrontendDiagnostic.h"
50 #include "llvm/ADT/STLExtras.h"
51 #include "llvm/ADT/StringExtras.h"
52 #include "llvm/ADT/StringSwitch.h"
53 #include "llvm/Analysis/TargetLibraryInfo.h"
54 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
55 #include "llvm/IR/CallingConv.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/Intrinsics.h"
58 #include "llvm/IR/LLVMContext.h"
59 #include "llvm/IR/Module.h"
60 #include "llvm/IR/ProfileSummary.h"
61 #include "llvm/ProfileData/InstrProfReader.h"
62 #include "llvm/ProfileData/SampleProf.h"
63 #include "llvm/Support/CRC.h"
64 #include "llvm/Support/CodeGen.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/ConvertUTF.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/TimeProfiler.h"
69 #include "llvm/Support/xxhash.h"
70 #include "llvm/TargetParser/Triple.h"
71 #include "llvm/TargetParser/X86TargetParser.h"
72 #include <optional>
74 using namespace clang;
75 using namespace CodeGen;
77 static llvm::cl::opt<bool> LimitedCoverage(
78 "limited-coverage-experimental", llvm::cl::Hidden,
79 llvm::cl::desc("Emit limited coverage mapping information (experimental)"));
81 static const char AnnotationSection[] = "llvm.metadata";
83 static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
84 switch (CGM.getContext().getCXXABIKind()) {
85 case TargetCXXABI::AppleARM64:
86 case TargetCXXABI::Fuchsia:
87 case TargetCXXABI::GenericAArch64:
88 case TargetCXXABI::GenericARM:
89 case TargetCXXABI::iOS:
90 case TargetCXXABI::WatchOS:
91 case TargetCXXABI::GenericMIPS:
92 case TargetCXXABI::GenericItanium:
93 case TargetCXXABI::WebAssembly:
94 case TargetCXXABI::XL:
95 return CreateItaniumCXXABI(CGM);
96 case TargetCXXABI::Microsoft:
97 return CreateMicrosoftCXXABI(CGM);
100 llvm_unreachable("invalid C++ ABI kind");
103 CodeGenModule::CodeGenModule(ASTContext &C,
104 IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
105 const HeaderSearchOptions &HSO,
106 const PreprocessorOptions &PPO,
107 const CodeGenOptions &CGO, llvm::Module &M,
108 DiagnosticsEngine &diags,
109 CoverageSourceInfo *CoverageInfo)
110 : Context(C), LangOpts(C.getLangOpts()), FS(FS), HeaderSearchOpts(HSO),
111 PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
112 Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
113 VMContext(M.getContext()), Types(*this), VTables(*this),
114 SanitizerMD(new SanitizerMetadata(*this)) {
116 // Initialize the type cache.
117 llvm::LLVMContext &LLVMContext = M.getContext();
118 VoidTy = llvm::Type::getVoidTy(LLVMContext);
119 Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
120 Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
121 Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
122 Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
123 HalfTy = llvm::Type::getHalfTy(LLVMContext);
124 BFloatTy = llvm::Type::getBFloatTy(LLVMContext);
125 FloatTy = llvm::Type::getFloatTy(LLVMContext);
126 DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
127 PointerWidthInBits = C.getTargetInfo().getPointerWidth(LangAS::Default);
128 PointerAlignInBytes =
129 C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(LangAS::Default))
130 .getQuantity();
131 SizeSizeInBytes =
132 C.toCharUnitsFromBits(C.getTargetInfo().getMaxPointerWidth()).getQuantity();
133 IntAlignInBytes =
134 C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
135 CharTy =
136 llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getCharWidth());
137 IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
138 IntPtrTy = llvm::IntegerType::get(LLVMContext,
139 C.getTargetInfo().getMaxPointerWidth());
140 Int8PtrTy = Int8Ty->getPointerTo(0);
141 Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
142 const llvm::DataLayout &DL = M.getDataLayout();
143 AllocaInt8PtrTy = Int8Ty->getPointerTo(DL.getAllocaAddrSpace());
144 GlobalsInt8PtrTy = Int8Ty->getPointerTo(DL.getDefaultGlobalsAddressSpace());
145 ConstGlobalsPtrTy = Int8Ty->getPointerTo(
146 C.getTargetAddressSpace(GetGlobalConstantAddressSpace()));
147 ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace();
149 // Build C++20 Module initializers.
150 // TODO: Add Microsoft here once we know the mangling required for the
151 // initializers.
152 CXX20ModuleInits =
153 LangOpts.CPlusPlusModules && getCXXABI().getMangleContext().getKind() ==
154 ItaniumMangleContext::MK_Itanium;
156 RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
158 if (LangOpts.ObjC)
159 createObjCRuntime();
160 if (LangOpts.OpenCL)
161 createOpenCLRuntime();
162 if (LangOpts.OpenMP)
163 createOpenMPRuntime();
164 if (LangOpts.CUDA)
165 createCUDARuntime();
166 if (LangOpts.HLSL)
167 createHLSLRuntime();
169 // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
170 if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
171 (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
172 TBAA.reset(new CodeGenTBAA(Context, TheModule, CodeGenOpts, getLangOpts(),
173 getCXXABI().getMangleContext()));
175 // If debug info or coverage generation is enabled, create the CGDebugInfo
176 // object.
177 if (CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo ||
178 CodeGenOpts.CoverageNotesFile.size() ||
179 CodeGenOpts.CoverageDataFile.size())
180 DebugInfo.reset(new CGDebugInfo(*this));
182 Block.GlobalUniqueCount = 0;
184 if (C.getLangOpts().ObjC)
185 ObjCData.reset(new ObjCEntrypoints());
187 if (CodeGenOpts.hasProfileClangUse()) {
188 auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
189 CodeGenOpts.ProfileInstrumentUsePath, *FS,
190 CodeGenOpts.ProfileRemappingFile);
191 // We're checking for profile read errors in CompilerInvocation, so if
192 // there was an error it should've already been caught. If it hasn't been
193 // somehow, trip an assertion.
194 assert(ReaderOrErr);
195 PGOReader = std::move(ReaderOrErr.get());
198 // If coverage mapping generation is enabled, create the
199 // CoverageMappingModuleGen object.
200 if (CodeGenOpts.CoverageMapping)
201 CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
203 // Generate the module name hash here if needed.
204 if (CodeGenOpts.UniqueInternalLinkageNames &&
205 !getModule().getSourceFileName().empty()) {
206 std::string Path = getModule().getSourceFileName();
207 // Check if a path substitution is needed from the MacroPrefixMap.
208 for (const auto &Entry : LangOpts.MacroPrefixMap)
209 if (Path.rfind(Entry.first, 0) != std::string::npos) {
210 Path = Entry.second + Path.substr(Entry.first.size());
211 break;
213 ModuleNameHash = llvm::getUniqueInternalLinkagePostfix(Path);
217 CodeGenModule::~CodeGenModule() {}
219 void CodeGenModule::createObjCRuntime() {
220 // This is just isGNUFamily(), but we want to force implementors of
221 // new ABIs to decide how best to do this.
222 switch (LangOpts.ObjCRuntime.getKind()) {
223 case ObjCRuntime::GNUstep:
224 case ObjCRuntime::GCC:
225 case ObjCRuntime::ObjFW:
226 ObjCRuntime.reset(CreateGNUObjCRuntime(*this));
227 return;
229 case ObjCRuntime::FragileMacOSX:
230 case ObjCRuntime::MacOSX:
231 case ObjCRuntime::iOS:
232 case ObjCRuntime::WatchOS:
233 ObjCRuntime.reset(CreateMacObjCRuntime(*this));
234 return;
236 llvm_unreachable("bad runtime kind");
239 void CodeGenModule::createOpenCLRuntime() {
240 OpenCLRuntime.reset(new CGOpenCLRuntime(*this));
243 void CodeGenModule::createOpenMPRuntime() {
244 // Select a specialized code generation class based on the target, if any.
245 // If it does not exist use the default implementation.
246 switch (getTriple().getArch()) {
247 case llvm::Triple::nvptx:
248 case llvm::Triple::nvptx64:
249 case llvm::Triple::amdgcn:
250 assert(getLangOpts().OpenMPIsDevice &&
251 "OpenMP AMDGPU/NVPTX is only prepared to deal with device code.");
252 OpenMPRuntime.reset(new CGOpenMPRuntimeGPU(*this));
253 break;
254 default:
255 if (LangOpts.OpenMPSimd)
256 OpenMPRuntime.reset(new CGOpenMPSIMDRuntime(*this));
257 else
258 OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
259 break;
263 void CodeGenModule::createCUDARuntime() {
264 CUDARuntime.reset(CreateNVCUDARuntime(*this));
267 void CodeGenModule::createHLSLRuntime() {
268 HLSLRuntime.reset(new CGHLSLRuntime(*this));
271 void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
272 Replacements[Name] = C;
275 void CodeGenModule::applyReplacements() {
276 for (auto &I : Replacements) {
277 StringRef MangledName = I.first();
278 llvm::Constant *Replacement = I.second;
279 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
280 if (!Entry)
281 continue;
282 auto *OldF = cast<llvm::Function>(Entry);
283 auto *NewF = dyn_cast<llvm::Function>(Replacement);
284 if (!NewF) {
285 if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Replacement)) {
286 NewF = dyn_cast<llvm::Function>(Alias->getAliasee());
287 } else {
288 auto *CE = cast<llvm::ConstantExpr>(Replacement);
289 assert(CE->getOpcode() == llvm::Instruction::BitCast ||
290 CE->getOpcode() == llvm::Instruction::GetElementPtr);
291 NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
295 // Replace old with new, but keep the old order.
296 OldF->replaceAllUsesWith(Replacement);
297 if (NewF) {
298 NewF->removeFromParent();
299 OldF->getParent()->getFunctionList().insertAfter(OldF->getIterator(),
300 NewF);
302 OldF->eraseFromParent();
306 void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
307 GlobalValReplacements.push_back(std::make_pair(GV, C));
310 void CodeGenModule::applyGlobalValReplacements() {
311 for (auto &I : GlobalValReplacements) {
312 llvm::GlobalValue *GV = I.first;
313 llvm::Constant *C = I.second;
315 GV->replaceAllUsesWith(C);
316 GV->eraseFromParent();
320 // This is only used in aliases that we created and we know they have a
321 // linear structure.
322 static const llvm::GlobalValue *getAliasedGlobal(const llvm::GlobalValue *GV) {
323 const llvm::Constant *C;
324 if (auto *GA = dyn_cast<llvm::GlobalAlias>(GV))
325 C = GA->getAliasee();
326 else if (auto *GI = dyn_cast<llvm::GlobalIFunc>(GV))
327 C = GI->getResolver();
328 else
329 return GV;
331 const auto *AliaseeGV = dyn_cast<llvm::GlobalValue>(C->stripPointerCasts());
332 if (!AliaseeGV)
333 return nullptr;
335 const llvm::GlobalValue *FinalGV = AliaseeGV->getAliaseeObject();
336 if (FinalGV == GV)
337 return nullptr;
339 return FinalGV;
342 static bool checkAliasedGlobal(
343 DiagnosticsEngine &Diags, SourceLocation Location, bool IsIFunc,
344 const llvm::GlobalValue *Alias, const llvm::GlobalValue *&GV,
345 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames,
346 SourceRange AliasRange) {
347 GV = getAliasedGlobal(Alias);
348 if (!GV) {
349 Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc;
350 return false;
353 if (GV->isDeclaration()) {
354 Diags.Report(Location, diag::err_alias_to_undefined) << IsIFunc << IsIFunc;
355 Diags.Report(Location, diag::note_alias_requires_mangled_name)
356 << IsIFunc << IsIFunc;
357 // Provide a note if the given function is not found and exists as a
358 // mangled name.
359 for (const auto &[Decl, Name] : MangledDeclNames) {
360 if (const auto *ND = dyn_cast<NamedDecl>(Decl.getDecl())) {
361 if (ND->getName() == GV->getName()) {
362 Diags.Report(Location, diag::note_alias_mangled_name_alternative)
363 << Name
364 << FixItHint::CreateReplacement(
365 AliasRange,
366 (Twine(IsIFunc ? "ifunc" : "alias") + "(\"" + Name + "\")")
367 .str());
371 return false;
374 if (IsIFunc) {
375 // Check resolver function type.
376 const auto *F = dyn_cast<llvm::Function>(GV);
377 if (!F) {
378 Diags.Report(Location, diag::err_alias_to_undefined)
379 << IsIFunc << IsIFunc;
380 return false;
383 llvm::FunctionType *FTy = F->getFunctionType();
384 if (!FTy->getReturnType()->isPointerTy()) {
385 Diags.Report(Location, diag::err_ifunc_resolver_return);
386 return false;
390 return true;
393 void CodeGenModule::checkAliases() {
394 // Check if the constructed aliases are well formed. It is really unfortunate
395 // that we have to do this in CodeGen, but we only construct mangled names
396 // and aliases during codegen.
397 bool Error = false;
398 DiagnosticsEngine &Diags = getDiags();
399 for (const GlobalDecl &GD : Aliases) {
400 const auto *D = cast<ValueDecl>(GD.getDecl());
401 SourceLocation Location;
402 SourceRange Range;
403 bool IsIFunc = D->hasAttr<IFuncAttr>();
404 if (const Attr *A = D->getDefiningAttr()) {
405 Location = A->getLocation();
406 Range = A->getRange();
407 } else
408 llvm_unreachable("Not an alias or ifunc?");
410 StringRef MangledName = getMangledName(GD);
411 llvm::GlobalValue *Alias = GetGlobalValue(MangledName);
412 const llvm::GlobalValue *GV = nullptr;
413 if (!checkAliasedGlobal(Diags, Location, IsIFunc, Alias, GV,
414 MangledDeclNames, Range)) {
415 Error = true;
416 continue;
419 llvm::Constant *Aliasee =
420 IsIFunc ? cast<llvm::GlobalIFunc>(Alias)->getResolver()
421 : cast<llvm::GlobalAlias>(Alias)->getAliasee();
423 llvm::GlobalValue *AliaseeGV;
424 if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee))
425 AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
426 else
427 AliaseeGV = cast<llvm::GlobalValue>(Aliasee);
429 if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
430 StringRef AliasSection = SA->getName();
431 if (AliasSection != AliaseeGV->getSection())
432 Diags.Report(SA->getLocation(), diag::warn_alias_with_section)
433 << AliasSection << IsIFunc << IsIFunc;
436 // We have to handle alias to weak aliases in here. LLVM itself disallows
437 // this since the object semantics would not match the IL one. For
438 // compatibility with gcc we implement it by just pointing the alias
439 // to its aliasee's aliasee. We also warn, since the user is probably
440 // expecting the link to be weak.
441 if (auto *GA = dyn_cast<llvm::GlobalAlias>(AliaseeGV)) {
442 if (GA->isInterposable()) {
443 Diags.Report(Location, diag::warn_alias_to_weak_alias)
444 << GV->getName() << GA->getName() << IsIFunc;
445 Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
446 GA->getAliasee(), Alias->getType());
448 if (IsIFunc)
449 cast<llvm::GlobalIFunc>(Alias)->setResolver(Aliasee);
450 else
451 cast<llvm::GlobalAlias>(Alias)->setAliasee(Aliasee);
455 if (!Error)
456 return;
458 for (const GlobalDecl &GD : Aliases) {
459 StringRef MangledName = getMangledName(GD);
460 llvm::GlobalValue *Alias = GetGlobalValue(MangledName);
461 Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
462 Alias->eraseFromParent();
466 void CodeGenModule::clear() {
467 DeferredDeclsToEmit.clear();
468 EmittedDeferredDecls.clear();
469 if (OpenMPRuntime)
470 OpenMPRuntime->clear();
473 void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
474 StringRef MainFile) {
475 if (!hasDiagnostics())
476 return;
477 if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
478 if (MainFile.empty())
479 MainFile = "<stdin>";
480 Diags.Report(diag::warn_profile_data_unprofiled) << MainFile;
481 } else {
482 if (Mismatched > 0)
483 Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Mismatched;
485 if (Missing > 0)
486 Diags.Report(diag::warn_profile_data_missing) << Visited << Missing;
490 static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO,
491 llvm::Module &M) {
492 if (!LO.VisibilityFromDLLStorageClass)
493 return;
495 llvm::GlobalValue::VisibilityTypes DLLExportVisibility =
496 CodeGenModule::GetLLVMVisibility(LO.getDLLExportVisibility());
497 llvm::GlobalValue::VisibilityTypes NoDLLStorageClassVisibility =
498 CodeGenModule::GetLLVMVisibility(LO.getNoDLLStorageClassVisibility());
499 llvm::GlobalValue::VisibilityTypes ExternDeclDLLImportVisibility =
500 CodeGenModule::GetLLVMVisibility(LO.getExternDeclDLLImportVisibility());
501 llvm::GlobalValue::VisibilityTypes ExternDeclNoDLLStorageClassVisibility =
502 CodeGenModule::GetLLVMVisibility(
503 LO.getExternDeclNoDLLStorageClassVisibility());
505 for (llvm::GlobalValue &GV : M.global_values()) {
506 if (GV.hasAppendingLinkage() || GV.hasLocalLinkage())
507 continue;
509 // Reset DSO locality before setting the visibility. This removes
510 // any effects that visibility options and annotations may have
511 // had on the DSO locality. Setting the visibility will implicitly set
512 // appropriate globals to DSO Local; however, this will be pessimistic
513 // w.r.t. to the normal compiler IRGen.
514 GV.setDSOLocal(false);
516 if (GV.isDeclarationForLinker()) {
517 GV.setVisibility(GV.getDLLStorageClass() ==
518 llvm::GlobalValue::DLLImportStorageClass
519 ? ExternDeclDLLImportVisibility
520 : ExternDeclNoDLLStorageClassVisibility);
521 } else {
522 GV.setVisibility(GV.getDLLStorageClass() ==
523 llvm::GlobalValue::DLLExportStorageClass
524 ? DLLExportVisibility
525 : NoDLLStorageClassVisibility);
528 GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
532 void CodeGenModule::Release() {
533 Module *Primary = getContext().getCurrentNamedModule();
534 if (CXX20ModuleInits && Primary && !Primary->isHeaderLikeModule())
535 EmitModuleInitializers(Primary);
536 EmitDeferred();
537 DeferredDecls.insert(EmittedDeferredDecls.begin(),
538 EmittedDeferredDecls.end());
539 EmittedDeferredDecls.clear();
540 EmitVTablesOpportunistically();
541 applyGlobalValReplacements();
542 applyReplacements();
543 emitMultiVersionFunctions();
545 if (Context.getLangOpts().IncrementalExtensions &&
546 GlobalTopLevelStmtBlockInFlight.first) {
547 const TopLevelStmtDecl *TLSD = GlobalTopLevelStmtBlockInFlight.second;
548 GlobalTopLevelStmtBlockInFlight.first->FinishFunction(TLSD->getEndLoc());
549 GlobalTopLevelStmtBlockInFlight = {nullptr, nullptr};
552 // Module implementations are initialized the same way as a regular TU that
553 // imports one or more modules.
554 if (CXX20ModuleInits && Primary && Primary->isInterfaceOrPartition())
555 EmitCXXModuleInitFunc(Primary);
556 else
557 EmitCXXGlobalInitFunc();
558 EmitCXXGlobalCleanUpFunc();
559 registerGlobalDtorsWithAtExit();
560 EmitCXXThreadLocalInitFunc();
561 if (ObjCRuntime)
562 if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
563 AddGlobalCtor(ObjCInitFunction);
564 if (Context.getLangOpts().CUDA && CUDARuntime) {
565 if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule())
566 AddGlobalCtor(CudaCtorFunction);
568 if (OpenMPRuntime) {
569 if (llvm::Function *OpenMPRequiresDirectiveRegFun =
570 OpenMPRuntime->emitRequiresDirectiveRegFun()) {
571 AddGlobalCtor(OpenMPRequiresDirectiveRegFun, 0);
573 OpenMPRuntime->createOffloadEntriesAndInfoMetadata();
574 OpenMPRuntime->clear();
576 if (PGOReader) {
577 getModule().setProfileSummary(
578 PGOReader->getSummary(/* UseCS */ false).getMD(VMContext),
579 llvm::ProfileSummary::PSK_Instr);
580 if (PGOStats.hasDiagnostics())
581 PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
583 llvm::stable_sort(GlobalCtors, [](const Structor &L, const Structor &R) {
584 return L.LexOrder < R.LexOrder;
586 EmitCtorList(GlobalCtors, "llvm.global_ctors");
587 EmitCtorList(GlobalDtors, "llvm.global_dtors");
588 EmitGlobalAnnotations();
589 EmitStaticExternCAliases();
590 checkAliases();
591 EmitDeferredUnusedCoverageMappings();
592 CodeGenPGO(*this).setValueProfilingFlag(getModule());
593 if (CoverageMapping)
594 CoverageMapping->emit();
595 if (CodeGenOpts.SanitizeCfiCrossDso) {
596 CodeGenFunction(*this).EmitCfiCheckFail();
597 CodeGenFunction(*this).EmitCfiCheckStub();
599 if (LangOpts.Sanitize.has(SanitizerKind::KCFI))
600 finalizeKCFITypes();
601 emitAtAvailableLinkGuard();
602 if (Context.getTargetInfo().getTriple().isWasm())
603 EmitMainVoidAlias();
605 if (getTriple().isAMDGPU()) {
606 // Emit amdgpu_code_object_version module flag, which is code object version
607 // times 100.
608 if (getTarget().getTargetOpts().CodeObjectVersion !=
609 TargetOptions::COV_None) {
610 getModule().addModuleFlag(llvm::Module::Error,
611 "amdgpu_code_object_version",
612 getTarget().getTargetOpts().CodeObjectVersion);
615 // Currently, "-mprintf-kind" option is only supported for HIP
616 if (LangOpts.HIP) {
617 auto *MDStr = llvm::MDString::get(
618 getLLVMContext(), (getTarget().getTargetOpts().AMDGPUPrintfKindVal ==
619 TargetOptions::AMDGPUPrintfKind::Hostcall)
620 ? "hostcall"
621 : "buffered");
622 getModule().addModuleFlag(llvm::Module::Error, "amdgpu_printf_kind",
623 MDStr);
627 // Emit a global array containing all external kernels or device variables
628 // used by host functions and mark it as used for CUDA/HIP. This is necessary
629 // to get kernels or device variables in archives linked in even if these
630 // kernels or device variables are only used in host functions.
631 if (!Context.CUDAExternalDeviceDeclODRUsedByHost.empty()) {
632 SmallVector<llvm::Constant *, 8> UsedArray;
633 for (auto D : Context.CUDAExternalDeviceDeclODRUsedByHost) {
634 GlobalDecl GD;
635 if (auto *FD = dyn_cast<FunctionDecl>(D))
636 GD = GlobalDecl(FD, KernelReferenceKind::Kernel);
637 else
638 GD = GlobalDecl(D);
639 UsedArray.push_back(llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
640 GetAddrOfGlobal(GD), Int8PtrTy));
643 llvm::ArrayType *ATy = llvm::ArrayType::get(Int8PtrTy, UsedArray.size());
645 auto *GV = new llvm::GlobalVariable(
646 getModule(), ATy, false, llvm::GlobalValue::InternalLinkage,
647 llvm::ConstantArray::get(ATy, UsedArray), "__clang_gpu_used_external");
648 addCompilerUsedGlobal(GV);
651 emitLLVMUsed();
652 if (SanStats)
653 SanStats->finish();
655 if (CodeGenOpts.Autolink &&
656 (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
657 EmitModuleLinkOptions();
660 // On ELF we pass the dependent library specifiers directly to the linker
661 // without manipulating them. This is in contrast to other platforms where
662 // they are mapped to a specific linker option by the compiler. This
663 // difference is a result of the greater variety of ELF linkers and the fact
664 // that ELF linkers tend to handle libraries in a more complicated fashion
665 // than on other platforms. This forces us to defer handling the dependent
666 // libs to the linker.
668 // CUDA/HIP device and host libraries are different. Currently there is no
669 // way to differentiate dependent libraries for host or device. Existing
670 // usage of #pragma comment(lib, *) is intended for host libraries on
671 // Windows. Therefore emit llvm.dependent-libraries only for host.
672 if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
673 auto *NMD = getModule().getOrInsertNamedMetadata("llvm.dependent-libraries");
674 for (auto *MD : ELFDependentLibraries)
675 NMD->addOperand(MD);
678 // Record mregparm value now so it is visible through rest of codegen.
679 if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
680 getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
681 CodeGenOpts.NumRegisterParameters);
683 if (CodeGenOpts.DwarfVersion) {
684 getModule().addModuleFlag(llvm::Module::Max, "Dwarf Version",
685 CodeGenOpts.DwarfVersion);
688 if (CodeGenOpts.Dwarf64)
689 getModule().addModuleFlag(llvm::Module::Max, "DWARF64", 1);
691 if (Context.getLangOpts().SemanticInterposition)
692 // Require various optimization to respect semantic interposition.
693 getModule().setSemanticInterposition(true);
695 if (CodeGenOpts.EmitCodeView) {
696 // Indicate that we want CodeView in the metadata.
697 getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
699 if (CodeGenOpts.CodeViewGHash) {
700 getModule().addModuleFlag(llvm::Module::Warning, "CodeViewGHash", 1);
702 if (CodeGenOpts.ControlFlowGuard) {
703 // Function ID tables and checks for Control Flow Guard (cfguard=2).
704 getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 2);
705 } else if (CodeGenOpts.ControlFlowGuardNoChecks) {
706 // Function ID tables for Control Flow Guard (cfguard=1).
707 getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 1);
709 if (CodeGenOpts.EHContGuard) {
710 // Function ID tables for EH Continuation Guard.
711 getModule().addModuleFlag(llvm::Module::Warning, "ehcontguard", 1);
713 if (Context.getLangOpts().Kernel) {
714 // Note if we are compiling with /kernel.
715 getModule().addModuleFlag(llvm::Module::Warning, "ms-kernel", 1);
717 if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
718 // We don't support LTO with 2 with different StrictVTablePointers
719 // FIXME: we could support it by stripping all the information introduced
720 // by StrictVTablePointers.
722 getModule().addModuleFlag(llvm::Module::Error, "StrictVTablePointers",1);
724 llvm::Metadata *Ops[2] = {
725 llvm::MDString::get(VMContext, "StrictVTablePointers"),
726 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
727 llvm::Type::getInt32Ty(VMContext), 1))};
729 getModule().addModuleFlag(llvm::Module::Require,
730 "StrictVTablePointersRequirement",
731 llvm::MDNode::get(VMContext, Ops));
733 if (getModuleDebugInfo())
734 // We support a single version in the linked module. The LLVM
735 // parser will drop debug info with a different version number
736 // (and warn about it, too).
737 getModule().addModuleFlag(llvm::Module::Warning, "Debug Info Version",
738 llvm::DEBUG_METADATA_VERSION);
740 // We need to record the widths of enums and wchar_t, so that we can generate
741 // the correct build attributes in the ARM backend. wchar_size is also used by
742 // TargetLibraryInfo.
743 uint64_t WCharWidth =
744 Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
745 getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth);
747 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
748 if ( Arch == llvm::Triple::arm
749 || Arch == llvm::Triple::armeb
750 || Arch == llvm::Triple::thumb
751 || Arch == llvm::Triple::thumbeb) {
752 // The minimum width of an enum in bytes
753 uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
754 getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
757 if (Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64) {
758 StringRef ABIStr = Target.getABI();
759 llvm::LLVMContext &Ctx = TheModule.getContext();
760 getModule().addModuleFlag(llvm::Module::Error, "target-abi",
761 llvm::MDString::get(Ctx, ABIStr));
764 if (CodeGenOpts.SanitizeCfiCrossDso) {
765 // Indicate that we want cross-DSO control flow integrity checks.
766 getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
769 if (CodeGenOpts.WholeProgramVTables) {
770 // Indicate whether VFE was enabled for this module, so that the
771 // vcall_visibility metadata added under whole program vtables is handled
772 // appropriately in the optimizer.
773 getModule().addModuleFlag(llvm::Module::Error, "Virtual Function Elim",
774 CodeGenOpts.VirtualFunctionElimination);
777 if (LangOpts.Sanitize.has(SanitizerKind::CFIICall)) {
778 getModule().addModuleFlag(llvm::Module::Override,
779 "CFI Canonical Jump Tables",
780 CodeGenOpts.SanitizeCfiCanonicalJumpTables);
783 if (LangOpts.Sanitize.has(SanitizerKind::KCFI)) {
784 getModule().addModuleFlag(llvm::Module::Override, "kcfi", 1);
785 // KCFI assumes patchable-function-prefix is the same for all indirectly
786 // called functions. Store the expected offset for code generation.
787 if (CodeGenOpts.PatchableFunctionEntryOffset)
788 getModule().addModuleFlag(llvm::Module::Override, "kcfi-offset",
789 CodeGenOpts.PatchableFunctionEntryOffset);
792 if (CodeGenOpts.CFProtectionReturn &&
793 Target.checkCFProtectionReturnSupported(getDiags())) {
794 // Indicate that we want to instrument return control flow protection.
795 getModule().addModuleFlag(llvm::Module::Min, "cf-protection-return",
799 if (CodeGenOpts.CFProtectionBranch &&
800 Target.checkCFProtectionBranchSupported(getDiags())) {
801 // Indicate that we want to instrument branch control flow protection.
802 getModule().addModuleFlag(llvm::Module::Min, "cf-protection-branch",
806 if (CodeGenOpts.FunctionReturnThunks)
807 getModule().addModuleFlag(llvm::Module::Override, "function_return_thunk_extern", 1);
809 if (CodeGenOpts.IndirectBranchCSPrefix)
810 getModule().addModuleFlag(llvm::Module::Override, "indirect_branch_cs_prefix", 1);
812 // Add module metadata for return address signing (ignoring
813 // non-leaf/all) and stack tagging. These are actually turned on by function
814 // attributes, but we use module metadata to emit build attributes. This is
815 // needed for LTO, where the function attributes are inside bitcode
816 // serialised into a global variable by the time build attributes are
817 // emitted, so we can't access them. LTO objects could be compiled with
818 // different flags therefore module flags are set to "Min" behavior to achieve
819 // the same end result of the normal build where e.g BTI is off if any object
820 // doesn't support it.
821 if (Context.getTargetInfo().hasFeature("ptrauth") &&
822 LangOpts.getSignReturnAddressScope() !=
823 LangOptions::SignReturnAddressScopeKind::None)
824 getModule().addModuleFlag(llvm::Module::Override,
825 "sign-return-address-buildattr", 1);
826 if (LangOpts.Sanitize.has(SanitizerKind::MemtagStack))
827 getModule().addModuleFlag(llvm::Module::Override,
828 "tag-stack-memory-buildattr", 1);
830 if (Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb ||
831 Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
832 Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
833 Arch == llvm::Triple::aarch64_be) {
834 if (LangOpts.BranchTargetEnforcement)
835 getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
837 if (LangOpts.hasSignReturnAddress())
838 getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 1);
839 if (LangOpts.isSignReturnAddressScopeAll())
840 getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all",
842 if (!LangOpts.isSignReturnAddressWithAKey())
843 getModule().addModuleFlag(llvm::Module::Min,
844 "sign-return-address-with-bkey", 1);
847 if (!CodeGenOpts.MemoryProfileOutput.empty()) {
848 llvm::LLVMContext &Ctx = TheModule.getContext();
849 getModule().addModuleFlag(
850 llvm::Module::Error, "MemProfProfileFilename",
851 llvm::MDString::get(Ctx, CodeGenOpts.MemoryProfileOutput));
854 if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
855 // Indicate whether __nvvm_reflect should be configured to flush denormal
856 // floating point values to 0. (This corresponds to its "__CUDA_FTZ"
857 // property.)
858 getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
859 CodeGenOpts.FP32DenormalMode.Output !=
860 llvm::DenormalMode::IEEE);
863 if (LangOpts.EHAsynch)
864 getModule().addModuleFlag(llvm::Module::Warning, "eh-asynch", 1);
866 // Indicate whether this Module was compiled with -fopenmp
867 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
868 getModule().addModuleFlag(llvm::Module::Max, "openmp", LangOpts.OpenMP);
869 if (getLangOpts().OpenMPIsDevice)
870 getModule().addModuleFlag(llvm::Module::Max, "openmp-device",
871 LangOpts.OpenMP);
873 // Emit OpenCL specific module metadata: OpenCL/SPIR version.
874 if (LangOpts.OpenCL || (LangOpts.CUDAIsDevice && getTriple().isSPIRV())) {
875 EmitOpenCLMetadata();
876 // Emit SPIR version.
877 if (getTriple().isSPIR()) {
878 // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
879 // opencl.spir.version named metadata.
880 // C++ for OpenCL has a distinct mapping for version compatibility with
881 // OpenCL.
882 auto Version = LangOpts.getOpenCLCompatibleVersion();
883 llvm::Metadata *SPIRVerElts[] = {
884 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
885 Int32Ty, Version / 100)),
886 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
887 Int32Ty, (Version / 100 > 1) ? 0 : 2))};
888 llvm::NamedMDNode *SPIRVerMD =
889 TheModule.getOrInsertNamedMetadata("opencl.spir.version");
890 llvm::LLVMContext &Ctx = TheModule.getContext();
891 SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
895 // HLSL related end of code gen work items.
896 if (LangOpts.HLSL)
897 getHLSLRuntime().finishCodeGen();
899 if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
900 assert(PLevel < 3 && "Invalid PIC Level");
901 getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
902 if (Context.getLangOpts().PIE)
903 getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
906 if (getCodeGenOpts().CodeModel.size() > 0) {
907 unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel)
908 .Case("tiny", llvm::CodeModel::Tiny)
909 .Case("small", llvm::CodeModel::Small)
910 .Case("kernel", llvm::CodeModel::Kernel)
911 .Case("medium", llvm::CodeModel::Medium)
912 .Case("large", llvm::CodeModel::Large)
913 .Default(~0u);
914 if (CM != ~0u) {
915 llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
916 getModule().setCodeModel(codeModel);
920 if (CodeGenOpts.NoPLT)
921 getModule().setRtLibUseGOT();
922 if (getTriple().isOSBinFormatELF() &&
923 CodeGenOpts.DirectAccessExternalData !=
924 getModule().getDirectAccessExternalData()) {
925 getModule().setDirectAccessExternalData(
926 CodeGenOpts.DirectAccessExternalData);
928 if (CodeGenOpts.UnwindTables)
929 getModule().setUwtable(llvm::UWTableKind(CodeGenOpts.UnwindTables));
931 switch (CodeGenOpts.getFramePointer()) {
932 case CodeGenOptions::FramePointerKind::None:
933 // 0 ("none") is the default.
934 break;
935 case CodeGenOptions::FramePointerKind::NonLeaf:
936 getModule().setFramePointer(llvm::FramePointerKind::NonLeaf);
937 break;
938 case CodeGenOptions::FramePointerKind::All:
939 getModule().setFramePointer(llvm::FramePointerKind::All);
940 break;
943 SimplifyPersonality();
945 if (getCodeGenOpts().EmitDeclMetadata)
946 EmitDeclMetadata();
948 if (getCodeGenOpts().CoverageNotesFile.size() ||
949 getCodeGenOpts().CoverageDataFile.size())
950 EmitCoverageFile();
952 if (CGDebugInfo *DI = getModuleDebugInfo())
953 DI->finalize();
955 if (getCodeGenOpts().EmitVersionIdentMetadata)
956 EmitVersionIdentMetadata();
958 if (!getCodeGenOpts().RecordCommandLine.empty())
959 EmitCommandLineMetadata();
961 if (!getCodeGenOpts().StackProtectorGuard.empty())
962 getModule().setStackProtectorGuard(getCodeGenOpts().StackProtectorGuard);
963 if (!getCodeGenOpts().StackProtectorGuardReg.empty())
964 getModule().setStackProtectorGuardReg(
965 getCodeGenOpts().StackProtectorGuardReg);
966 if (!getCodeGenOpts().StackProtectorGuardSymbol.empty())
967 getModule().setStackProtectorGuardSymbol(
968 getCodeGenOpts().StackProtectorGuardSymbol);
969 if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX)
970 getModule().setStackProtectorGuardOffset(
971 getCodeGenOpts().StackProtectorGuardOffset);
972 if (getCodeGenOpts().StackAlignment)
973 getModule().setOverrideStackAlignment(getCodeGenOpts().StackAlignment);
974 if (getCodeGenOpts().SkipRaxSetup)
975 getModule().addModuleFlag(llvm::Module::Override, "SkipRaxSetup", 1);
977 if (getContext().getTargetInfo().getMaxTLSAlign())
978 getModule().addModuleFlag(llvm::Module::Error, "MaxTLSAlign",
979 getContext().getTargetInfo().getMaxTLSAlign());
981 getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
983 EmitBackendOptionsMetadata(getCodeGenOpts());
985 // If there is device offloading code embed it in the host now.
986 EmbedObject(&getModule(), CodeGenOpts, getDiags());
988 // Set visibility from DLL storage class
989 // We do this at the end of LLVM IR generation; after any operation
990 // that might affect the DLL storage class or the visibility, and
991 // before anything that might act on these.
992 setVisibilityFromDLLStorageClass(LangOpts, getModule());
995 void CodeGenModule::EmitOpenCLMetadata() {
996 // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
997 // opencl.ocl.version named metadata node.
998 // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL.
999 auto Version = LangOpts.getOpenCLCompatibleVersion();
1000 llvm::Metadata *OCLVerElts[] = {
1001 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1002 Int32Ty, Version / 100)),
1003 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1004 Int32Ty, (Version % 100) / 10))};
1005 llvm::NamedMDNode *OCLVerMD =
1006 TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
1007 llvm::LLVMContext &Ctx = TheModule.getContext();
1008 OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
1011 void CodeGenModule::EmitBackendOptionsMetadata(
1012 const CodeGenOptions &CodeGenOpts) {
1013 if (getTriple().isRISCV()) {
1014 getModule().addModuleFlag(llvm::Module::Min, "SmallDataLimit",
1015 CodeGenOpts.SmallDataLimit);
1019 void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
1020 // Make sure that this type is translated.
1021 Types.UpdateCompletedType(TD);
1024 void CodeGenModule::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
1025 // Make sure that this type is translated.
1026 Types.RefreshTypeCacheForClass(RD);
1029 llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
1030 if (!TBAA)
1031 return nullptr;
1032 return TBAA->getTypeInfo(QTy);
1035 TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) {
1036 if (!TBAA)
1037 return TBAAAccessInfo();
1038 if (getLangOpts().CUDAIsDevice) {
1039 // As CUDA builtin surface/texture types are replaced, skip generating TBAA
1040 // access info.
1041 if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
1042 if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
1043 nullptr)
1044 return TBAAAccessInfo();
1045 } else if (AccessType->isCUDADeviceBuiltinTextureType()) {
1046 if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
1047 nullptr)
1048 return TBAAAccessInfo();
1051 return TBAA->getAccessInfo(AccessType);
1054 TBAAAccessInfo
1055 CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) {
1056 if (!TBAA)
1057 return TBAAAccessInfo();
1058 return TBAA->getVTablePtrAccessInfo(VTablePtrType);
1061 llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
1062 if (!TBAA)
1063 return nullptr;
1064 return TBAA->getTBAAStructInfo(QTy);
1067 llvm::MDNode *CodeGenModule::getTBAABaseTypeInfo(QualType QTy) {
1068 if (!TBAA)
1069 return nullptr;
1070 return TBAA->getBaseTypeInfo(QTy);
1073 llvm::MDNode *CodeGenModule::getTBAAAccessTagInfo(TBAAAccessInfo Info) {
1074 if (!TBAA)
1075 return nullptr;
1076 return TBAA->getAccessTagInfo(Info);
1079 TBAAAccessInfo CodeGenModule::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo,
1080 TBAAAccessInfo TargetInfo) {
1081 if (!TBAA)
1082 return TBAAAccessInfo();
1083 return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo);
1086 TBAAAccessInfo
1087 CodeGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
1088 TBAAAccessInfo InfoB) {
1089 if (!TBAA)
1090 return TBAAAccessInfo();
1091 return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB);
1094 TBAAAccessInfo
1095 CodeGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
1096 TBAAAccessInfo SrcInfo) {
1097 if (!TBAA)
1098 return TBAAAccessInfo();
1099 return TBAA->mergeTBAAInfoForConditionalOperator(DestInfo, SrcInfo);
1102 void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
1103 TBAAAccessInfo TBAAInfo) {
1104 if (llvm::MDNode *Tag = getTBAAAccessTagInfo(TBAAInfo))
1105 Inst->setMetadata(llvm::LLVMContext::MD_tbaa, Tag);
1108 void CodeGenModule::DecorateInstructionWithInvariantGroup(
1109 llvm::Instruction *I, const CXXRecordDecl *RD) {
1110 I->setMetadata(llvm::LLVMContext::MD_invariant_group,
1111 llvm::MDNode::get(getLLVMContext(), {}));
1114 void CodeGenModule::Error(SourceLocation loc, StringRef message) {
1115 unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
1116 getDiags().Report(Context.getFullLoc(loc), diagID) << message;
1119 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1120 /// specified stmt yet.
1121 void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
1122 unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
1123 "cannot compile this %0 yet");
1124 std::string Msg = Type;
1125 getDiags().Report(Context.getFullLoc(S->getBeginLoc()), DiagID)
1126 << Msg << S->getSourceRange();
1129 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1130 /// specified decl yet.
1131 void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
1132 unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
1133 "cannot compile this %0 yet");
1134 std::string Msg = Type;
1135 getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
1138 llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
1139 return llvm::ConstantInt::get(SizeTy, size.getQuantity());
1142 void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
1143 const NamedDecl *D) const {
1144 // Internal definitions always have default visibility.
1145 if (GV->hasLocalLinkage()) {
1146 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
1147 return;
1149 if (!D)
1150 return;
1151 // Set visibility for definitions, and for declarations if requested globally
1152 // or set explicitly.
1153 LinkageInfo LV = D->getLinkageAndVisibility();
1154 if (GV->hasDLLExportStorageClass() || GV->hasDLLImportStorageClass()) {
1155 // Reject incompatible dlllstorage and visibility annotations.
1156 if (!LV.isVisibilityExplicit())
1157 return;
1158 if (GV->hasDLLExportStorageClass()) {
1159 if (LV.getVisibility() == HiddenVisibility)
1160 getDiags().Report(D->getLocation(),
1161 diag::err_hidden_visibility_dllexport);
1162 } else if (LV.getVisibility() != DefaultVisibility) {
1163 getDiags().Report(D->getLocation(),
1164 diag::err_non_default_visibility_dllimport);
1166 return;
1169 if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
1170 !GV->isDeclarationForLinker())
1171 GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
1174 static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
1175 llvm::GlobalValue *GV) {
1176 if (GV->hasLocalLinkage())
1177 return true;
1179 if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())
1180 return true;
1182 // DLLImport explicitly marks the GV as external.
1183 if (GV->hasDLLImportStorageClass())
1184 return false;
1186 const llvm::Triple &TT = CGM.getTriple();
1187 if (TT.isWindowsGNUEnvironment()) {
1188 // In MinGW, variables without DLLImport can still be automatically
1189 // imported from a DLL by the linker; don't mark variables that
1190 // potentially could come from another DLL as DSO local.
1192 // With EmulatedTLS, TLS variables can be autoimported from other DLLs
1193 // (and this actually happens in the public interface of libstdc++), so
1194 // such variables can't be marked as DSO local. (Native TLS variables
1195 // can't be dllimported at all, though.)
1196 if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(GV) &&
1197 (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS))
1198 return false;
1201 // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
1202 // remain unresolved in the link, they can be resolved to zero, which is
1203 // outside the current DSO.
1204 if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
1205 return false;
1207 // Every other GV is local on COFF.
1208 // Make an exception for windows OS in the triple: Some firmware builds use
1209 // *-win32-macho triples. This (accidentally?) produced windows relocations
1210 // without GOT tables in older clang versions; Keep this behaviour.
1211 // FIXME: even thread local variables?
1212 if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
1213 return true;
1215 // Only handle COFF and ELF for now.
1216 if (!TT.isOSBinFormatELF())
1217 return false;
1219 // If this is not an executable, don't assume anything is local.
1220 const auto &CGOpts = CGM.getCodeGenOpts();
1221 llvm::Reloc::Model RM = CGOpts.RelocationModel;
1222 const auto &LOpts = CGM.getLangOpts();
1223 if (RM != llvm::Reloc::Static && !LOpts.PIE) {
1224 // On ELF, if -fno-semantic-interposition is specified and the target
1225 // supports local aliases, there will be neither CC1
1226 // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set
1227 // dso_local on the function if using a local alias is preferable (can avoid
1228 // PLT indirection).
1229 if (!(isa<llvm::Function>(GV) && GV->canBenefitFromLocalAlias()))
1230 return false;
1231 return !(CGM.getLangOpts().SemanticInterposition ||
1232 CGM.getLangOpts().HalfNoSemanticInterposition);
1235 // A definition cannot be preempted from an executable.
1236 if (!GV->isDeclarationForLinker())
1237 return true;
1239 // Most PIC code sequences that assume that a symbol is local cannot produce a
1240 // 0 if it turns out the symbol is undefined. While this is ABI and relocation
1241 // depended, it seems worth it to handle it here.
1242 if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
1243 return false;
1245 // PowerPC64 prefers TOC indirection to avoid copy relocations.
1246 if (TT.isPPC64())
1247 return false;
1249 if (CGOpts.DirectAccessExternalData) {
1250 // If -fdirect-access-external-data (default for -fno-pic), set dso_local
1251 // for non-thread-local variables. If the symbol is not defined in the
1252 // executable, a copy relocation will be needed at link time. dso_local is
1253 // excluded for thread-local variables because they generally don't support
1254 // copy relocations.
1255 if (auto *Var = dyn_cast<llvm::GlobalVariable>(GV))
1256 if (!Var->isThreadLocal())
1257 return true;
1259 // -fno-pic sets dso_local on a function declaration to allow direct
1260 // accesses when taking its address (similar to a data symbol). If the
1261 // function is not defined in the executable, a canonical PLT entry will be
1262 // needed at link time. -fno-direct-access-external-data can avoid the
1263 // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as
1264 // it could just cause trouble without providing perceptible benefits.
1265 if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
1266 return true;
1269 // If we can use copy relocations we can assume it is local.
1271 // Otherwise don't assume it is local.
1272 return false;
1275 void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const {
1276 GV->setDSOLocal(shouldAssumeDSOLocal(*this, GV));
1279 void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
1280 GlobalDecl GD) const {
1281 const auto *D = dyn_cast<NamedDecl>(GD.getDecl());
1282 // C++ destructors have a few C++ ABI specific special cases.
1283 if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(D)) {
1284 getCXXABI().setCXXDestructorDLLStorage(GV, Dtor, GD.getDtorType());
1285 return;
1287 setDLLImportDLLExport(GV, D);
1290 void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
1291 const NamedDecl *D) const {
1292 if (D && D->isExternallyVisible()) {
1293 if (D->hasAttr<DLLImportAttr>())
1294 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
1295 else if ((D->hasAttr<DLLExportAttr>() ||
1296 shouldMapVisibilityToDLLExport(D)) &&
1297 !GV->isDeclarationForLinker())
1298 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
1302 void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1303 GlobalDecl GD) const {
1304 setDLLImportDLLExport(GV, GD);
1305 setGVPropertiesAux(GV, dyn_cast<NamedDecl>(GD.getDecl()));
1308 void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1309 const NamedDecl *D) const {
1310 setDLLImportDLLExport(GV, D);
1311 setGVPropertiesAux(GV, D);
1314 void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
1315 const NamedDecl *D) const {
1316 setGlobalVisibility(GV, D);
1317 setDSOLocal(GV);
1318 GV->setPartition(CodeGenOpts.SymbolPartition);
1321 static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
1322 return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
1323 .Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
1324 .Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
1325 .Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
1326 .Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
1329 llvm::GlobalVariable::ThreadLocalMode
1330 CodeGenModule::GetDefaultLLVMTLSModel() const {
1331 switch (CodeGenOpts.getDefaultTLSModel()) {
1332 case CodeGenOptions::GeneralDynamicTLSModel:
1333 return llvm::GlobalVariable::GeneralDynamicTLSModel;
1334 case CodeGenOptions::LocalDynamicTLSModel:
1335 return llvm::GlobalVariable::LocalDynamicTLSModel;
1336 case CodeGenOptions::InitialExecTLSModel:
1337 return llvm::GlobalVariable::InitialExecTLSModel;
1338 case CodeGenOptions::LocalExecTLSModel:
1339 return llvm::GlobalVariable::LocalExecTLSModel;
1341 llvm_unreachable("Invalid TLS model!");
1344 void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
1345 assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
1347 llvm::GlobalValue::ThreadLocalMode TLM;
1348 TLM = GetDefaultLLVMTLSModel();
1350 // Override the TLS model if it is explicitly specified.
1351 if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
1352 TLM = GetLLVMTLSModel(Attr->getModel());
1355 GV->setThreadLocalMode(TLM);
1358 static std::string getCPUSpecificMangling(const CodeGenModule &CGM,
1359 StringRef Name) {
1360 const TargetInfo &Target = CGM.getTarget();
1361 return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str();
1364 static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
1365 const CPUSpecificAttr *Attr,
1366 unsigned CPUIndex,
1367 raw_ostream &Out) {
1368 // cpu_specific gets the current name, dispatch gets the resolver if IFunc is
1369 // supported.
1370 if (Attr)
1371 Out << getCPUSpecificMangling(CGM, Attr->getCPUName(CPUIndex)->getName());
1372 else if (CGM.getTarget().supportsIFunc())
1373 Out << ".resolver";
1376 static void AppendTargetVersionMangling(const CodeGenModule &CGM,
1377 const TargetVersionAttr *Attr,
1378 raw_ostream &Out) {
1379 if (Attr->isDefaultVersion())
1380 return;
1381 Out << "._";
1382 const TargetInfo &TI = CGM.getTarget();
1383 llvm::SmallVector<StringRef, 8> Feats;
1384 Attr->getFeatures(Feats);
1385 llvm::stable_sort(Feats, [&TI](const StringRef FeatL, const StringRef FeatR) {
1386 return TI.multiVersionSortPriority(FeatL) <
1387 TI.multiVersionSortPriority(FeatR);
1389 for (const auto &Feat : Feats) {
1390 Out << 'M';
1391 Out << Feat;
1395 static void AppendTargetMangling(const CodeGenModule &CGM,
1396 const TargetAttr *Attr, raw_ostream &Out) {
1397 if (Attr->isDefaultVersion())
1398 return;
1400 Out << '.';
1401 const TargetInfo &Target = CGM.getTarget();
1402 ParsedTargetAttr Info = Target.parseTargetAttr(Attr->getFeaturesStr());
1403 llvm::sort(Info.Features, [&Target](StringRef LHS, StringRef RHS) {
1404 // Multiversioning doesn't allow "no-${feature}", so we can
1405 // only have "+" prefixes here.
1406 assert(LHS.startswith("+") && RHS.startswith("+") &&
1407 "Features should always have a prefix.");
1408 return Target.multiVersionSortPriority(LHS.substr(1)) >
1409 Target.multiVersionSortPriority(RHS.substr(1));
1412 bool IsFirst = true;
1414 if (!Info.CPU.empty()) {
1415 IsFirst = false;
1416 Out << "arch_" << Info.CPU;
1419 for (StringRef Feat : Info.Features) {
1420 if (!IsFirst)
1421 Out << '_';
1422 IsFirst = false;
1423 Out << Feat.substr(1);
1427 // Returns true if GD is a function decl with internal linkage and
1428 // needs a unique suffix after the mangled name.
1429 static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
1430 CodeGenModule &CGM) {
1431 const Decl *D = GD.getDecl();
1432 return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(D) &&
1433 (CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage);
1436 static void AppendTargetClonesMangling(const CodeGenModule &CGM,
1437 const TargetClonesAttr *Attr,
1438 unsigned VersionIndex,
1439 raw_ostream &Out) {
1440 const TargetInfo &TI = CGM.getTarget();
1441 if (TI.getTriple().isAArch64()) {
1442 StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
1443 if (FeatureStr == "default")
1444 return;
1445 Out << "._";
1446 SmallVector<StringRef, 8> Features;
1447 FeatureStr.split(Features, "+");
1448 llvm::stable_sort(Features,
1449 [&TI](const StringRef FeatL, const StringRef FeatR) {
1450 return TI.multiVersionSortPriority(FeatL) <
1451 TI.multiVersionSortPriority(FeatR);
1453 for (auto &Feat : Features) {
1454 Out << 'M';
1455 Out << Feat;
1457 } else {
1458 Out << '.';
1459 StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
1460 if (FeatureStr.startswith("arch="))
1461 Out << "arch_" << FeatureStr.substr(sizeof("arch=") - 1);
1462 else
1463 Out << FeatureStr;
1465 Out << '.' << Attr->getMangledIndex(VersionIndex);
1469 static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
1470 const NamedDecl *ND,
1471 bool OmitMultiVersionMangling = false) {
1472 SmallString<256> Buffer;
1473 llvm::raw_svector_ostream Out(Buffer);
1474 MangleContext &MC = CGM.getCXXABI().getMangleContext();
1475 if (!CGM.getModuleNameHash().empty())
1476 MC.needsUniqueInternalLinkageNames();
1477 bool ShouldMangle = MC.shouldMangleDeclName(ND);
1478 if (ShouldMangle)
1479 MC.mangleName(GD.getWithDecl(ND), Out);
1480 else {
1481 IdentifierInfo *II = ND->getIdentifier();
1482 assert(II && "Attempt to mangle unnamed decl.");
1483 const auto *FD = dyn_cast<FunctionDecl>(ND);
1485 if (FD &&
1486 FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
1487 Out << "__regcall3__" << II->getName();
1488 } else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
1489 GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
1490 Out << "__device_stub__" << II->getName();
1491 } else {
1492 Out << II->getName();
1496 // Check if the module name hash should be appended for internal linkage
1497 // symbols. This should come before multi-version target suffixes are
1498 // appended. This is to keep the name and module hash suffix of the
1499 // internal linkage function together. The unique suffix should only be
1500 // added when name mangling is done to make sure that the final name can
1501 // be properly demangled. For example, for C functions without prototypes,
1502 // name mangling is not done and the unique suffix should not be appeneded
1503 // then.
1504 if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) {
1505 assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames &&
1506 "Hash computed when not explicitly requested");
1507 Out << CGM.getModuleNameHash();
1510 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
1511 if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
1512 switch (FD->getMultiVersionKind()) {
1513 case MultiVersionKind::CPUDispatch:
1514 case MultiVersionKind::CPUSpecific:
1515 AppendCPUSpecificCPUDispatchMangling(CGM,
1516 FD->getAttr<CPUSpecificAttr>(),
1517 GD.getMultiVersionIndex(), Out);
1518 break;
1519 case MultiVersionKind::Target:
1520 AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
1521 break;
1522 case MultiVersionKind::TargetVersion:
1523 AppendTargetVersionMangling(CGM, FD->getAttr<TargetVersionAttr>(), Out);
1524 break;
1525 case MultiVersionKind::TargetClones:
1526 AppendTargetClonesMangling(CGM, FD->getAttr<TargetClonesAttr>(),
1527 GD.getMultiVersionIndex(), Out);
1528 break;
1529 case MultiVersionKind::None:
1530 llvm_unreachable("None multiversion type isn't valid here");
1534 // Make unique name for device side static file-scope variable for HIP.
1535 if (CGM.getContext().shouldExternalize(ND) &&
1536 CGM.getLangOpts().GPURelocatableDeviceCode &&
1537 CGM.getLangOpts().CUDAIsDevice)
1538 CGM.printPostfixForExternalizedDecl(Out, ND);
1540 return std::string(Out.str());
1543 void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
1544 const FunctionDecl *FD,
1545 StringRef &CurName) {
1546 if (!FD->isMultiVersion())
1547 return;
1549 // Get the name of what this would be without the 'target' attribute. This
1550 // allows us to lookup the version that was emitted when this wasn't a
1551 // multiversion function.
1552 std::string NonTargetName =
1553 getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
1554 GlobalDecl OtherGD;
1555 if (lookupRepresentativeDecl(NonTargetName, OtherGD)) {
1556 assert(OtherGD.getCanonicalDecl()
1557 .getDecl()
1558 ->getAsFunction()
1559 ->isMultiVersion() &&
1560 "Other GD should now be a multiversioned function");
1561 // OtherFD is the version of this function that was mangled BEFORE
1562 // becoming a MultiVersion function. It potentially needs to be updated.
1563 const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
1564 .getDecl()
1565 ->getAsFunction()
1566 ->getMostRecentDecl();
1567 std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD);
1568 // This is so that if the initial version was already the 'default'
1569 // version, we don't try to update it.
1570 if (OtherName != NonTargetName) {
1571 // Remove instead of erase, since others may have stored the StringRef
1572 // to this.
1573 const auto ExistingRecord = Manglings.find(NonTargetName);
1574 if (ExistingRecord != std::end(Manglings))
1575 Manglings.remove(&(*ExistingRecord));
1576 auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
1577 StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] =
1578 Result.first->first();
1579 // If this is the current decl is being created, make sure we update the name.
1580 if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl())
1581 CurName = OtherNameRef;
1582 if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
1583 Entry->setName(OtherName);
1588 StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
1589 GlobalDecl CanonicalGD = GD.getCanonicalDecl();
1591 // Some ABIs don't have constructor variants. Make sure that base and
1592 // complete constructors get mangled the same.
1593 if (const auto *CD = dyn_cast<CXXConstructorDecl>(CanonicalGD.getDecl())) {
1594 if (!getTarget().getCXXABI().hasConstructorVariants()) {
1595 CXXCtorType OrigCtorType = GD.getCtorType();
1596 assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete);
1597 if (OrigCtorType == Ctor_Base)
1598 CanonicalGD = GlobalDecl(CD, Ctor_Complete);
1602 // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a
1603 // static device variable depends on whether the variable is referenced by
1604 // a host or device host function. Therefore the mangled name cannot be
1605 // cached.
1606 if (!LangOpts.CUDAIsDevice || !getContext().mayExternalize(GD.getDecl())) {
1607 auto FoundName = MangledDeclNames.find(CanonicalGD);
1608 if (FoundName != MangledDeclNames.end())
1609 return FoundName->second;
1612 // Keep the first result in the case of a mangling collision.
1613 const auto *ND = cast<NamedDecl>(GD.getDecl());
1614 std::string MangledName = getMangledNameImpl(*this, GD, ND);
1616 // Ensure either we have different ABIs between host and device compilations,
1617 // says host compilation following MSVC ABI but device compilation follows
1618 // Itanium C++ ABI or, if they follow the same ABI, kernel names after
1619 // mangling should be the same after name stubbing. The later checking is
1620 // very important as the device kernel name being mangled in host-compilation
1621 // is used to resolve the device binaries to be executed. Inconsistent naming
1622 // result in undefined behavior. Even though we cannot check that naming
1623 // directly between host- and device-compilations, the host- and
1624 // device-mangling in host compilation could help catching certain ones.
1625 assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
1626 getContext().shouldExternalize(ND) || getLangOpts().CUDAIsDevice ||
1627 (getContext().getAuxTargetInfo() &&
1628 (getContext().getAuxTargetInfo()->getCXXABI() !=
1629 getContext().getTargetInfo().getCXXABI())) ||
1630 getCUDARuntime().getDeviceSideName(ND) ==
1631 getMangledNameImpl(
1632 *this,
1633 GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel),
1634 ND));
1636 auto Result = Manglings.insert(std::make_pair(MangledName, GD));
1637 return MangledDeclNames[CanonicalGD] = Result.first->first();
1640 StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
1641 const BlockDecl *BD) {
1642 MangleContext &MangleCtx = getCXXABI().getMangleContext();
1643 const Decl *D = GD.getDecl();
1645 SmallString<256> Buffer;
1646 llvm::raw_svector_ostream Out(Buffer);
1647 if (!D)
1648 MangleCtx.mangleGlobalBlock(BD,
1649 dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
1650 else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
1651 MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
1652 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(D))
1653 MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
1654 else
1655 MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
1657 auto Result = Manglings.insert(std::make_pair(Out.str(), BD));
1658 return Result.first->first();
1661 const GlobalDecl CodeGenModule::getMangledNameDecl(StringRef Name) {
1662 auto it = MangledDeclNames.begin();
1663 while (it != MangledDeclNames.end()) {
1664 if (it->second == Name)
1665 return it->first;
1666 it++;
1668 return GlobalDecl();
1671 llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
1672 return getModule().getNamedValue(Name);
1675 /// AddGlobalCtor - Add a function to the list that will be called before
1676 /// main() runs.
1677 void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
1678 unsigned LexOrder,
1679 llvm::Constant *AssociatedData) {
1680 // FIXME: Type coercion of void()* types.
1681 GlobalCtors.push_back(Structor(Priority, LexOrder, Ctor, AssociatedData));
1684 /// AddGlobalDtor - Add a function to the list that will be called
1685 /// when the module is unloaded.
1686 void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority,
1687 bool IsDtorAttrFunc) {
1688 if (CodeGenOpts.RegisterGlobalDtorsWithAtExit &&
1689 (!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) {
1690 DtorsUsingAtExit[Priority].push_back(Dtor);
1691 return;
1694 // FIXME: Type coercion of void()* types.
1695 GlobalDtors.push_back(Structor(Priority, ~0U, Dtor, nullptr));
1698 void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
1699 if (Fns.empty()) return;
1701 // Ctor function type is void()*.
1702 llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
1703 llvm::Type *CtorPFTy = llvm::PointerType::get(CtorFTy,
1704 TheModule.getDataLayout().getProgramAddressSpace());
1706 // Get the type of a ctor entry, { i32, void ()*, i8* }.
1707 llvm::StructType *CtorStructTy = llvm::StructType::get(
1708 Int32Ty, CtorPFTy, VoidPtrTy);
1710 // Construct the constructor and destructor arrays.
1711 ConstantInitBuilder builder(*this);
1712 auto ctors = builder.beginArray(CtorStructTy);
1713 for (const auto &I : Fns) {
1714 auto ctor = ctors.beginStruct(CtorStructTy);
1715 ctor.addInt(Int32Ty, I.Priority);
1716 ctor.add(llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy));
1717 if (I.AssociatedData)
1718 ctor.add(llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy));
1719 else
1720 ctor.addNullPointer(VoidPtrTy);
1721 ctor.finishAndAddTo(ctors);
1724 auto list =
1725 ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(),
1726 /*constant*/ false,
1727 llvm::GlobalValue::AppendingLinkage);
1729 // The LTO linker doesn't seem to like it when we set an alignment
1730 // on appending variables. Take it off as a workaround.
1731 list->setAlignment(std::nullopt);
1733 Fns.clear();
1736 llvm::GlobalValue::LinkageTypes
1737 CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
1738 const auto *D = cast<FunctionDecl>(GD.getDecl());
1740 GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
1742 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(D))
1743 return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType());
1745 if (isa<CXXConstructorDecl>(D) &&
1746 cast<CXXConstructorDecl>(D)->isInheritingConstructor() &&
1747 Context.getTargetInfo().getCXXABI().isMicrosoft()) {
1748 // Our approach to inheriting constructors is fundamentally different from
1749 // that used by the MS ABI, so keep our inheriting constructor thunks
1750 // internal rather than trying to pick an unambiguous mangling for them.
1751 return llvm::GlobalValue::InternalLinkage;
1754 return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
1757 llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
1758 llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
1759 if (!MDS) return nullptr;
1761 return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
1764 llvm::ConstantInt *CodeGenModule::CreateKCFITypeId(QualType T) {
1765 if (auto *FnType = T->getAs<FunctionProtoType>())
1766 T = getContext().getFunctionType(
1767 FnType->getReturnType(), FnType->getParamTypes(),
1768 FnType->getExtProtoInfo().withExceptionSpec(EST_None));
1770 std::string OutName;
1771 llvm::raw_string_ostream Out(OutName);
1772 getCXXABI().getMangleContext().mangleTypeName(
1773 T, Out, getCodeGenOpts().SanitizeCfiICallNormalizeIntegers);
1775 if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers)
1776 Out << ".normalized";
1778 return llvm::ConstantInt::get(Int32Ty,
1779 static_cast<uint32_t>(llvm::xxHash64(OutName)));
1782 void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
1783 const CGFunctionInfo &Info,
1784 llvm::Function *F, bool IsThunk) {
1785 unsigned CallingConv;
1786 llvm::AttributeList PAL;
1787 ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv,
1788 /*AttrOnCallSite=*/false, IsThunk);
1789 F->setAttributes(PAL);
1790 F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1793 static void removeImageAccessQualifier(std::string& TyName) {
1794 std::string ReadOnlyQual("__read_only");
1795 std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
1796 if (ReadOnlyPos != std::string::npos)
1797 // "+ 1" for the space after access qualifier.
1798 TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
1799 else {
1800 std::string WriteOnlyQual("__write_only");
1801 std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
1802 if (WriteOnlyPos != std::string::npos)
1803 TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
1804 else {
1805 std::string ReadWriteQual("__read_write");
1806 std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
1807 if (ReadWritePos != std::string::npos)
1808 TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
1813 // Returns the address space id that should be produced to the
1814 // kernel_arg_addr_space metadata. This is always fixed to the ids
1815 // as specified in the SPIR 2.0 specification in order to differentiate
1816 // for example in clGetKernelArgInfo() implementation between the address
1817 // spaces with targets without unique mapping to the OpenCL address spaces
1818 // (basically all single AS CPUs).
1819 static unsigned ArgInfoAddressSpace(LangAS AS) {
1820 switch (AS) {
1821 case LangAS::opencl_global:
1822 return 1;
1823 case LangAS::opencl_constant:
1824 return 2;
1825 case LangAS::opencl_local:
1826 return 3;
1827 case LangAS::opencl_generic:
1828 return 4; // Not in SPIR 2.0 specs.
1829 case LangAS::opencl_global_device:
1830 return 5;
1831 case LangAS::opencl_global_host:
1832 return 6;
1833 default:
1834 return 0; // Assume private.
1838 void CodeGenModule::GenKernelArgMetadata(llvm::Function *Fn,
1839 const FunctionDecl *FD,
1840 CodeGenFunction *CGF) {
1841 assert(((FD && CGF) || (!FD && !CGF)) &&
1842 "Incorrect use - FD and CGF should either be both null or not!");
1843 // Create MDNodes that represent the kernel arg metadata.
1844 // Each MDNode is a list in the form of "key", N number of values which is
1845 // the same number of values as their are kernel arguments.
1847 const PrintingPolicy &Policy = Context.getPrintingPolicy();
1849 // MDNode for the kernel argument address space qualifiers.
1850 SmallVector<llvm::Metadata *, 8> addressQuals;
1852 // MDNode for the kernel argument access qualifiers (images only).
1853 SmallVector<llvm::Metadata *, 8> accessQuals;
1855 // MDNode for the kernel argument type names.
1856 SmallVector<llvm::Metadata *, 8> argTypeNames;
1858 // MDNode for the kernel argument base type names.
1859 SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
1861 // MDNode for the kernel argument type qualifiers.
1862 SmallVector<llvm::Metadata *, 8> argTypeQuals;
1864 // MDNode for the kernel argument names.
1865 SmallVector<llvm::Metadata *, 8> argNames;
1867 if (FD && CGF)
1868 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
1869 const ParmVarDecl *parm = FD->getParamDecl(i);
1870 // Get argument name.
1871 argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
1873 if (!getLangOpts().OpenCL)
1874 continue;
1875 QualType ty = parm->getType();
1876 std::string typeQuals;
1878 // Get image and pipe access qualifier:
1879 if (ty->isImageType() || ty->isPipeType()) {
1880 const Decl *PDecl = parm;
1881 if (const auto *TD = ty->getAs<TypedefType>())
1882 PDecl = TD->getDecl();
1883 const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
1884 if (A && A->isWriteOnly())
1885 accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
1886 else if (A && A->isReadWrite())
1887 accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
1888 else
1889 accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
1890 } else
1891 accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
1893 auto getTypeSpelling = [&](QualType Ty) {
1894 auto typeName = Ty.getUnqualifiedType().getAsString(Policy);
1896 if (Ty.isCanonical()) {
1897 StringRef typeNameRef = typeName;
1898 // Turn "unsigned type" to "utype"
1899 if (typeNameRef.consume_front("unsigned "))
1900 return std::string("u") + typeNameRef.str();
1901 if (typeNameRef.consume_front("signed "))
1902 return typeNameRef.str();
1905 return typeName;
1908 if (ty->isPointerType()) {
1909 QualType pointeeTy = ty->getPointeeType();
1911 // Get address qualifier.
1912 addressQuals.push_back(
1913 llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(
1914 ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
1916 // Get argument type name.
1917 std::string typeName = getTypeSpelling(pointeeTy) + "*";
1918 std::string baseTypeName =
1919 getTypeSpelling(pointeeTy.getCanonicalType()) + "*";
1920 argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1921 argBaseTypeNames.push_back(
1922 llvm::MDString::get(VMContext, baseTypeName));
1924 // Get argument type qualifiers:
1925 if (ty.isRestrictQualified())
1926 typeQuals = "restrict";
1927 if (pointeeTy.isConstQualified() ||
1928 (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
1929 typeQuals += typeQuals.empty() ? "const" : " const";
1930 if (pointeeTy.isVolatileQualified())
1931 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
1932 } else {
1933 uint32_t AddrSpc = 0;
1934 bool isPipe = ty->isPipeType();
1935 if (ty->isImageType() || isPipe)
1936 AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global);
1938 addressQuals.push_back(
1939 llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(AddrSpc)));
1941 // Get argument type name.
1942 ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty;
1943 std::string typeName = getTypeSpelling(ty);
1944 std::string baseTypeName = getTypeSpelling(ty.getCanonicalType());
1946 // Remove access qualifiers on images
1947 // (as they are inseparable from type in clang implementation,
1948 // but OpenCL spec provides a special query to get access qualifier
1949 // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
1950 if (ty->isImageType()) {
1951 removeImageAccessQualifier(typeName);
1952 removeImageAccessQualifier(baseTypeName);
1955 argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1956 argBaseTypeNames.push_back(
1957 llvm::MDString::get(VMContext, baseTypeName));
1959 if (isPipe)
1960 typeQuals = "pipe";
1962 argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
1965 if (getLangOpts().OpenCL) {
1966 Fn->setMetadata("kernel_arg_addr_space",
1967 llvm::MDNode::get(VMContext, addressQuals));
1968 Fn->setMetadata("kernel_arg_access_qual",
1969 llvm::MDNode::get(VMContext, accessQuals));
1970 Fn->setMetadata("kernel_arg_type",
1971 llvm::MDNode::get(VMContext, argTypeNames));
1972 Fn->setMetadata("kernel_arg_base_type",
1973 llvm::MDNode::get(VMContext, argBaseTypeNames));
1974 Fn->setMetadata("kernel_arg_type_qual",
1975 llvm::MDNode::get(VMContext, argTypeQuals));
1977 if (getCodeGenOpts().EmitOpenCLArgMetadata ||
1978 getCodeGenOpts().HIPSaveKernelArgName)
1979 Fn->setMetadata("kernel_arg_name",
1980 llvm::MDNode::get(VMContext, argNames));
1983 /// Determines whether the language options require us to model
1984 /// unwind exceptions. We treat -fexceptions as mandating this
1985 /// except under the fragile ObjC ABI with only ObjC exceptions
1986 /// enabled. This means, for example, that C with -fexceptions
1987 /// enables this.
1988 static bool hasUnwindExceptions(const LangOptions &LangOpts) {
1989 // If exceptions are completely disabled, obviously this is false.
1990 if (!LangOpts.Exceptions) return false;
1992 // If C++ exceptions are enabled, this is true.
1993 if (LangOpts.CXXExceptions) return true;
1995 // If ObjC exceptions are enabled, this depends on the ABI.
1996 if (LangOpts.ObjCExceptions) {
1997 return LangOpts.ObjCRuntime.hasUnwindExceptions();
2000 return true;
2003 static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM,
2004 const CXXMethodDecl *MD) {
2005 // Check that the type metadata can ever actually be used by a call.
2006 if (!CGM.getCodeGenOpts().LTOUnit ||
2007 !CGM.HasHiddenLTOVisibility(MD->getParent()))
2008 return false;
2010 // Only functions whose address can be taken with a member function pointer
2011 // need this sort of type metadata.
2012 return !MD->isStatic() && !MD->isVirtual() && !isa<CXXConstructorDecl>(MD) &&
2013 !isa<CXXDestructorDecl>(MD);
2016 std::vector<const CXXRecordDecl *>
2017 CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
2018 llvm::SetVector<const CXXRecordDecl *> MostBases;
2020 std::function<void (const CXXRecordDecl *)> CollectMostBases;
2021 CollectMostBases = [&](const CXXRecordDecl *RD) {
2022 if (RD->getNumBases() == 0)
2023 MostBases.insert(RD);
2024 for (const CXXBaseSpecifier &B : RD->bases())
2025 CollectMostBases(B.getType()->getAsCXXRecordDecl());
2027 CollectMostBases(RD);
2028 return MostBases.takeVector();
2031 llvm::GlobalVariable *
2032 CodeGenModule::GetOrCreateRTTIProxyGlobalVariable(llvm::Constant *Addr) {
2033 auto It = RTTIProxyMap.find(Addr);
2034 if (It != RTTIProxyMap.end())
2035 return It->second;
2037 auto *FTRTTIProxy = new llvm::GlobalVariable(
2038 TheModule, Addr->getType(),
2039 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, Addr,
2040 "__llvm_rtti_proxy");
2041 FTRTTIProxy->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2043 RTTIProxyMap[Addr] = FTRTTIProxy;
2044 return FTRTTIProxy;
2047 void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
2048 llvm::Function *F) {
2049 llvm::AttrBuilder B(F->getContext());
2051 if ((!D || !D->hasAttr<NoUwtableAttr>()) && CodeGenOpts.UnwindTables)
2052 B.addUWTableAttr(llvm::UWTableKind(CodeGenOpts.UnwindTables));
2054 if (CodeGenOpts.StackClashProtector)
2055 B.addAttribute("probe-stack", "inline-asm");
2057 if (!hasUnwindExceptions(LangOpts))
2058 B.addAttribute(llvm::Attribute::NoUnwind);
2060 if (D && D->hasAttr<NoStackProtectorAttr>())
2061 ; // Do nothing.
2062 else if (D && D->hasAttr<StrictGuardStackCheckAttr>() &&
2063 LangOpts.getStackProtector() == LangOptions::SSPOn)
2064 B.addAttribute(llvm::Attribute::StackProtectStrong);
2065 else if (LangOpts.getStackProtector() == LangOptions::SSPOn)
2066 B.addAttribute(llvm::Attribute::StackProtect);
2067 else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
2068 B.addAttribute(llvm::Attribute::StackProtectStrong);
2069 else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
2070 B.addAttribute(llvm::Attribute::StackProtectReq);
2072 if (!D) {
2073 // If we don't have a declaration to control inlining, the function isn't
2074 // explicitly marked as alwaysinline for semantic reasons, and inlining is
2075 // disabled, mark the function as noinline.
2076 if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
2077 CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
2078 B.addAttribute(llvm::Attribute::NoInline);
2080 F->addFnAttrs(B);
2081 return;
2084 // Track whether we need to add the optnone LLVM attribute,
2085 // starting with the default for this optimization level.
2086 bool ShouldAddOptNone =
2087 !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0;
2088 // We can't add optnone in the following cases, it won't pass the verifier.
2089 ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>();
2090 ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>();
2092 // Add optnone, but do so only if the function isn't always_inline.
2093 if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) &&
2094 !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
2095 B.addAttribute(llvm::Attribute::OptimizeNone);
2097 // OptimizeNone implies noinline; we should not be inlining such functions.
2098 B.addAttribute(llvm::Attribute::NoInline);
2100 // We still need to handle naked functions even though optnone subsumes
2101 // much of their semantics.
2102 if (D->hasAttr<NakedAttr>())
2103 B.addAttribute(llvm::Attribute::Naked);
2105 // OptimizeNone wins over OptimizeForSize and MinSize.
2106 F->removeFnAttr(llvm::Attribute::OptimizeForSize);
2107 F->removeFnAttr(llvm::Attribute::MinSize);
2108 } else if (D->hasAttr<NakedAttr>()) {
2109 // Naked implies noinline: we should not be inlining such functions.
2110 B.addAttribute(llvm::Attribute::Naked);
2111 B.addAttribute(llvm::Attribute::NoInline);
2112 } else if (D->hasAttr<NoDuplicateAttr>()) {
2113 B.addAttribute(llvm::Attribute::NoDuplicate);
2114 } else if (D->hasAttr<NoInlineAttr>() && !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
2115 // Add noinline if the function isn't always_inline.
2116 B.addAttribute(llvm::Attribute::NoInline);
2117 } else if (D->hasAttr<AlwaysInlineAttr>() &&
2118 !F->hasFnAttribute(llvm::Attribute::NoInline)) {
2119 // (noinline wins over always_inline, and we can't specify both in IR)
2120 B.addAttribute(llvm::Attribute::AlwaysInline);
2121 } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
2122 // If we're not inlining, then force everything that isn't always_inline to
2123 // carry an explicit noinline attribute.
2124 if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline))
2125 B.addAttribute(llvm::Attribute::NoInline);
2126 } else {
2127 // Otherwise, propagate the inline hint attribute and potentially use its
2128 // absence to mark things as noinline.
2129 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
2130 // Search function and template pattern redeclarations for inline.
2131 auto CheckForInline = [](const FunctionDecl *FD) {
2132 auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
2133 return Redecl->isInlineSpecified();
2135 if (any_of(FD->redecls(), CheckRedeclForInline))
2136 return true;
2137 const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
2138 if (!Pattern)
2139 return false;
2140 return any_of(Pattern->redecls(), CheckRedeclForInline);
2142 if (CheckForInline(FD)) {
2143 B.addAttribute(llvm::Attribute::InlineHint);
2144 } else if (CodeGenOpts.getInlining() ==
2145 CodeGenOptions::OnlyHintInlining &&
2146 !FD->isInlined() &&
2147 !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
2148 B.addAttribute(llvm::Attribute::NoInline);
2153 // Add other optimization related attributes if we are optimizing this
2154 // function.
2155 if (!D->hasAttr<OptimizeNoneAttr>()) {
2156 if (D->hasAttr<ColdAttr>()) {
2157 if (!ShouldAddOptNone)
2158 B.addAttribute(llvm::Attribute::OptimizeForSize);
2159 B.addAttribute(llvm::Attribute::Cold);
2161 if (D->hasAttr<HotAttr>())
2162 B.addAttribute(llvm::Attribute::Hot);
2163 if (D->hasAttr<MinSizeAttr>())
2164 B.addAttribute(llvm::Attribute::MinSize);
2167 F->addFnAttrs(B);
2169 unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
2170 if (alignment)
2171 F->setAlignment(llvm::Align(alignment));
2173 if (!D->hasAttr<AlignedAttr>())
2174 if (LangOpts.FunctionAlignment)
2175 F->setAlignment(llvm::Align(1ull << LangOpts.FunctionAlignment));
2177 // Some C++ ABIs require 2-byte alignment for member functions, in order to
2178 // reserve a bit for differentiating between virtual and non-virtual member
2179 // functions. If the current target's C++ ABI requires this and this is a
2180 // member function, set its alignment accordingly.
2181 if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
2182 if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
2183 F->setAlignment(llvm::Align(2));
2186 // In the cross-dso CFI mode with canonical jump tables, we want !type
2187 // attributes on definitions only.
2188 if (CodeGenOpts.SanitizeCfiCrossDso &&
2189 CodeGenOpts.SanitizeCfiCanonicalJumpTables) {
2190 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
2191 // Skip available_externally functions. They won't be codegen'ed in the
2192 // current module anyway.
2193 if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally)
2194 CreateFunctionTypeMetadataForIcall(FD, F);
2198 // Emit type metadata on member functions for member function pointer checks.
2199 // These are only ever necessary on definitions; we're guaranteed that the
2200 // definition will be present in the LTO unit as a result of LTO visibility.
2201 auto *MD = dyn_cast<CXXMethodDecl>(D);
2202 if (MD && requiresMemberFunctionPointerTypeMetadata(*this, MD)) {
2203 for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
2204 llvm::Metadata *Id =
2205 CreateMetadataIdentifierForType(Context.getMemberPointerType(
2206 MD->getType(), Context.getRecordType(Base).getTypePtr()));
2207 F->addTypeMetadata(0, Id);
2212 void CodeGenModule::setLLVMFunctionFEnvAttributes(const FunctionDecl *D,
2213 llvm::Function *F) {
2214 if (D->hasAttr<StrictFPAttr>()) {
2215 llvm::AttrBuilder FuncAttrs(F->getContext());
2216 FuncAttrs.addAttribute("strictfp");
2217 F->addFnAttrs(FuncAttrs);
2221 void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
2222 const Decl *D = GD.getDecl();
2223 if (isa_and_nonnull<NamedDecl>(D))
2224 setGVProperties(GV, GD);
2225 else
2226 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
2228 if (D && D->hasAttr<UsedAttr>())
2229 addUsedOrCompilerUsedGlobal(GV);
2231 if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
2232 const auto *VD = cast<VarDecl>(D);
2233 if (VD->getType().isConstQualified() &&
2234 VD->getStorageDuration() == SD_Static)
2235 addUsedOrCompilerUsedGlobal(GV);
2239 bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
2240 llvm::AttrBuilder &Attrs,
2241 bool SetTargetFeatures) {
2242 // Add target-cpu and target-features attributes to functions. If
2243 // we have a decl for the function and it has a target attribute then
2244 // parse that and add it to the feature set.
2245 StringRef TargetCPU = getTarget().getTargetOpts().CPU;
2246 StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU;
2247 std::vector<std::string> Features;
2248 const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
2249 FD = FD ? FD->getMostRecentDecl() : FD;
2250 const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
2251 const auto *TV = FD ? FD->getAttr<TargetVersionAttr>() : nullptr;
2252 assert((!TD || !TV) && "both target_version and target specified");
2253 const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
2254 const auto *TC = FD ? FD->getAttr<TargetClonesAttr>() : nullptr;
2255 bool AddedAttr = false;
2256 if (TD || TV || SD || TC) {
2257 llvm::StringMap<bool> FeatureMap;
2258 getContext().getFunctionFeatureMap(FeatureMap, GD);
2260 // Produce the canonical string for this set of features.
2261 for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
2262 Features.push_back((Entry.getValue() ? "+" : "-") + Entry.getKey().str());
2264 // Now add the target-cpu and target-features to the function.
2265 // While we populated the feature map above, we still need to
2266 // get and parse the target attribute so we can get the cpu for
2267 // the function.
2268 if (TD) {
2269 ParsedTargetAttr ParsedAttr =
2270 Target.parseTargetAttr(TD->getFeaturesStr());
2271 if (!ParsedAttr.CPU.empty() &&
2272 getTarget().isValidCPUName(ParsedAttr.CPU)) {
2273 TargetCPU = ParsedAttr.CPU;
2274 TuneCPU = ""; // Clear the tune CPU.
2276 if (!ParsedAttr.Tune.empty() &&
2277 getTarget().isValidCPUName(ParsedAttr.Tune))
2278 TuneCPU = ParsedAttr.Tune;
2281 if (SD) {
2282 // Apply the given CPU name as the 'tune-cpu' so that the optimizer can
2283 // favor this processor.
2284 TuneCPU = getTarget().getCPUSpecificTuneName(
2285 SD->getCPUName(GD.getMultiVersionIndex())->getName());
2287 } else {
2288 // Otherwise just add the existing target cpu and target features to the
2289 // function.
2290 Features = getTarget().getTargetOpts().Features;
2293 if (!TargetCPU.empty()) {
2294 Attrs.addAttribute("target-cpu", TargetCPU);
2295 AddedAttr = true;
2297 if (!TuneCPU.empty()) {
2298 Attrs.addAttribute("tune-cpu", TuneCPU);
2299 AddedAttr = true;
2301 if (!Features.empty() && SetTargetFeatures) {
2302 llvm::sort(Features);
2303 Attrs.addAttribute("target-features", llvm::join(Features, ","));
2304 AddedAttr = true;
2307 return AddedAttr;
2310 void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
2311 llvm::GlobalObject *GO) {
2312 const Decl *D = GD.getDecl();
2313 SetCommonAttributes(GD, GO);
2315 if (D) {
2316 if (auto *GV = dyn_cast<llvm::GlobalVariable>(GO)) {
2317 if (D->hasAttr<RetainAttr>())
2318 addUsedGlobal(GV);
2319 if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>())
2320 GV->addAttribute("bss-section", SA->getName());
2321 if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>())
2322 GV->addAttribute("data-section", SA->getName());
2323 if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>())
2324 GV->addAttribute("rodata-section", SA->getName());
2325 if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>())
2326 GV->addAttribute("relro-section", SA->getName());
2329 if (auto *F = dyn_cast<llvm::Function>(GO)) {
2330 if (D->hasAttr<RetainAttr>())
2331 addUsedGlobal(F);
2332 if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
2333 if (!D->getAttr<SectionAttr>())
2334 F->addFnAttr("implicit-section-name", SA->getName());
2336 llvm::AttrBuilder Attrs(F->getContext());
2337 if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
2338 // We know that GetCPUAndFeaturesAttributes will always have the
2339 // newest set, since it has the newest possible FunctionDecl, so the
2340 // new ones should replace the old.
2341 llvm::AttributeMask RemoveAttrs;
2342 RemoveAttrs.addAttribute("target-cpu");
2343 RemoveAttrs.addAttribute("target-features");
2344 RemoveAttrs.addAttribute("tune-cpu");
2345 F->removeFnAttrs(RemoveAttrs);
2346 F->addFnAttrs(Attrs);
2350 if (const auto *CSA = D->getAttr<CodeSegAttr>())
2351 GO->setSection(CSA->getName());
2352 else if (const auto *SA = D->getAttr<SectionAttr>())
2353 GO->setSection(SA->getName());
2356 getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
2359 void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD,
2360 llvm::Function *F,
2361 const CGFunctionInfo &FI) {
2362 const Decl *D = GD.getDecl();
2363 SetLLVMFunctionAttributes(GD, FI, F, /*IsThunk=*/false);
2364 SetLLVMFunctionAttributesForDefinition(D, F);
2366 F->setLinkage(llvm::Function::InternalLinkage);
2368 setNonAliasAttributes(GD, F);
2371 static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
2372 // Set linkage and visibility in case we never see a definition.
2373 LinkageInfo LV = ND->getLinkageAndVisibility();
2374 // Don't set internal linkage on declarations.
2375 // "extern_weak" is overloaded in LLVM; we probably should have
2376 // separate linkage types for this.
2377 if (isExternallyVisible(LV.getLinkage()) &&
2378 (ND->hasAttr<WeakAttr>() || ND->isWeakImported()))
2379 GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
2382 void CodeGenModule::CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD,
2383 llvm::Function *F) {
2384 // Only if we are checking indirect calls.
2385 if (!LangOpts.Sanitize.has(SanitizerKind::CFIICall))
2386 return;
2388 // Non-static class methods are handled via vtable or member function pointer
2389 // checks elsewhere.
2390 if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
2391 return;
2393 llvm::Metadata *MD = CreateMetadataIdentifierForType(FD->getType());
2394 F->addTypeMetadata(0, MD);
2395 F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(FD->getType()));
2397 // Emit a hash-based bit set entry for cross-DSO calls.
2398 if (CodeGenOpts.SanitizeCfiCrossDso)
2399 if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
2400 F->addTypeMetadata(0, llvm::ConstantAsMetadata::get(CrossDsoTypeId));
2403 void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) {
2404 if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
2405 return;
2407 llvm::LLVMContext &Ctx = F->getContext();
2408 llvm::MDBuilder MDB(Ctx);
2409 F->setMetadata(llvm::LLVMContext::MD_kcfi_type,
2410 llvm::MDNode::get(
2411 Ctx, MDB.createConstant(CreateKCFITypeId(FD->getType()))));
2414 static bool allowKCFIIdentifier(StringRef Name) {
2415 // KCFI type identifier constants are only necessary for external assembly
2416 // functions, which means it's safe to skip unusual names. Subset of
2417 // MCAsmInfo::isAcceptableChar() and MCAsmInfoXCOFF::isAcceptableChar().
2418 return llvm::all_of(Name, [](const char &C) {
2419 return llvm::isAlnum(C) || C == '_' || C == '.';
2423 void CodeGenModule::finalizeKCFITypes() {
2424 llvm::Module &M = getModule();
2425 for (auto &F : M.functions()) {
2426 // Remove KCFI type metadata from non-address-taken local functions.
2427 bool AddressTaken = F.hasAddressTaken();
2428 if (!AddressTaken && F.hasLocalLinkage())
2429 F.eraseMetadata(llvm::LLVMContext::MD_kcfi_type);
2431 // Generate a constant with the expected KCFI type identifier for all
2432 // address-taken function declarations to support annotating indirectly
2433 // called assembly functions.
2434 if (!AddressTaken || !F.isDeclaration())
2435 continue;
2437 const llvm::ConstantInt *Type;
2438 if (const llvm::MDNode *MD = F.getMetadata(llvm::LLVMContext::MD_kcfi_type))
2439 Type = llvm::mdconst::extract<llvm::ConstantInt>(MD->getOperand(0));
2440 else
2441 continue;
2443 StringRef Name = F.getName();
2444 if (!allowKCFIIdentifier(Name))
2445 continue;
2447 std::string Asm = (".weak __kcfi_typeid_" + Name + "\n.set __kcfi_typeid_" +
2448 Name + ", " + Twine(Type->getZExtValue()) + "\n")
2449 .str();
2450 M.appendModuleInlineAsm(Asm);
2454 void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
2455 bool IsIncompleteFunction,
2456 bool IsThunk) {
2458 if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) {
2459 // If this is an intrinsic function, set the function's attributes
2460 // to the intrinsic's attributes.
2461 F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID));
2462 return;
2465 const auto *FD = cast<FunctionDecl>(GD.getDecl());
2467 if (!IsIncompleteFunction)
2468 SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F,
2469 IsThunk);
2471 // Add the Returned attribute for "this", except for iOS 5 and earlier
2472 // where substantial code, including the libstdc++ dylib, was compiled with
2473 // GCC and does not actually return "this".
2474 if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
2475 !(getTriple().isiOS() && getTriple().isOSVersionLT(6))) {
2476 assert(!F->arg_empty() &&
2477 F->arg_begin()->getType()
2478 ->canLosslesslyBitCastTo(F->getReturnType()) &&
2479 "unexpected this return");
2480 F->addParamAttr(0, llvm::Attribute::Returned);
2483 // Only a few attributes are set on declarations; these may later be
2484 // overridden by a definition.
2486 setLinkageForGV(F, FD);
2487 setGVProperties(F, FD);
2489 // Setup target-specific attributes.
2490 if (!IsIncompleteFunction && F->isDeclaration())
2491 getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
2493 if (const auto *CSA = FD->getAttr<CodeSegAttr>())
2494 F->setSection(CSA->getName());
2495 else if (const auto *SA = FD->getAttr<SectionAttr>())
2496 F->setSection(SA->getName());
2498 if (const auto *EA = FD->getAttr<ErrorAttr>()) {
2499 if (EA->isError())
2500 F->addFnAttr("dontcall-error", EA->getUserDiagnostic());
2501 else if (EA->isWarning())
2502 F->addFnAttr("dontcall-warn", EA->getUserDiagnostic());
2505 // If we plan on emitting this inline builtin, we can't treat it as a builtin.
2506 if (FD->isInlineBuiltinDeclaration()) {
2507 const FunctionDecl *FDBody;
2508 bool HasBody = FD->hasBody(FDBody);
2509 (void)HasBody;
2510 assert(HasBody && "Inline builtin declarations should always have an "
2511 "available body!");
2512 if (shouldEmitFunction(FDBody))
2513 F->addFnAttr(llvm::Attribute::NoBuiltin);
2516 if (FD->isReplaceableGlobalAllocationFunction()) {
2517 // A replaceable global allocation function does not act like a builtin by
2518 // default, only if it is invoked by a new-expression or delete-expression.
2519 F->addFnAttr(llvm::Attribute::NoBuiltin);
2522 if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
2523 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2524 else if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
2525 if (MD->isVirtual())
2526 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2528 // Don't emit entries for function declarations in the cross-DSO mode. This
2529 // is handled with better precision by the receiving DSO. But if jump tables
2530 // are non-canonical then we need type metadata in order to produce the local
2531 // jump table.
2532 if (!CodeGenOpts.SanitizeCfiCrossDso ||
2533 !CodeGenOpts.SanitizeCfiCanonicalJumpTables)
2534 CreateFunctionTypeMetadataForIcall(FD, F);
2536 if (LangOpts.Sanitize.has(SanitizerKind::KCFI))
2537 setKCFIType(FD, F);
2539 if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
2540 getOpenMPRuntime().emitDeclareSimdFunction(FD, F);
2542 if (CodeGenOpts.InlineMaxStackSize != UINT_MAX)
2543 F->addFnAttr("inline-max-stacksize", llvm::utostr(CodeGenOpts.InlineMaxStackSize));
2545 if (const auto *CB = FD->getAttr<CallbackAttr>()) {
2546 // Annotate the callback behavior as metadata:
2547 // - The callback callee (as argument number).
2548 // - The callback payloads (as argument numbers).
2549 llvm::LLVMContext &Ctx = F->getContext();
2550 llvm::MDBuilder MDB(Ctx);
2552 // The payload indices are all but the first one in the encoding. The first
2553 // identifies the callback callee.
2554 int CalleeIdx = *CB->encoding_begin();
2555 ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end());
2556 F->addMetadata(llvm::LLVMContext::MD_callback,
2557 *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
2558 CalleeIdx, PayloadIndices,
2559 /* VarArgsArePassed */ false)}));
2563 void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
2564 assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
2565 "Only globals with definition can force usage.");
2566 LLVMUsed.emplace_back(GV);
2569 void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
2570 assert(!GV->isDeclaration() &&
2571 "Only globals with definition can force usage.");
2572 LLVMCompilerUsed.emplace_back(GV);
2575 void CodeGenModule::addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV) {
2576 assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
2577 "Only globals with definition can force usage.");
2578 if (getTriple().isOSBinFormatELF())
2579 LLVMCompilerUsed.emplace_back(GV);
2580 else
2581 LLVMUsed.emplace_back(GV);
2584 static void emitUsed(CodeGenModule &CGM, StringRef Name,
2585 std::vector<llvm::WeakTrackingVH> &List) {
2586 // Don't create llvm.used if there is no need.
2587 if (List.empty())
2588 return;
2590 // Convert List to what ConstantArray needs.
2591 SmallVector<llvm::Constant*, 8> UsedArray;
2592 UsedArray.resize(List.size());
2593 for (unsigned i = 0, e = List.size(); i != e; ++i) {
2594 UsedArray[i] =
2595 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2596 cast<llvm::Constant>(&*List[i]), CGM.Int8PtrTy);
2599 if (UsedArray.empty())
2600 return;
2601 llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size());
2603 auto *GV = new llvm::GlobalVariable(
2604 CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage,
2605 llvm::ConstantArray::get(ATy, UsedArray), Name);
2607 GV->setSection("llvm.metadata");
2610 void CodeGenModule::emitLLVMUsed() {
2611 emitUsed(*this, "llvm.used", LLVMUsed);
2612 emitUsed(*this, "llvm.compiler.used", LLVMCompilerUsed);
2615 void CodeGenModule::AppendLinkerOptions(StringRef Opts) {
2616 auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
2617 LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
2620 void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
2621 llvm::SmallString<32> Opt;
2622 getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt);
2623 if (Opt.empty())
2624 return;
2625 auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
2626 LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
2629 void CodeGenModule::AddDependentLib(StringRef Lib) {
2630 auto &C = getLLVMContext();
2631 if (getTarget().getTriple().isOSBinFormatELF()) {
2632 ELFDependentLibraries.push_back(
2633 llvm::MDNode::get(C, llvm::MDString::get(C, Lib)));
2634 return;
2637 llvm::SmallString<24> Opt;
2638 getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
2639 auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
2640 LinkerOptionsMetadata.push_back(llvm::MDNode::get(C, MDOpts));
2643 /// Add link options implied by the given module, including modules
2644 /// it depends on, using a postorder walk.
2645 static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
2646 SmallVectorImpl<llvm::MDNode *> &Metadata,
2647 llvm::SmallPtrSet<Module *, 16> &Visited) {
2648 // Import this module's parent.
2649 if (Mod->Parent && Visited.insert(Mod->Parent).second) {
2650 addLinkOptionsPostorder(CGM, Mod->Parent, Metadata, Visited);
2653 // Import this module's dependencies.
2654 for (Module *Import : llvm::reverse(Mod->Imports)) {
2655 if (Visited.insert(Import).second)
2656 addLinkOptionsPostorder(CGM, Import, Metadata, Visited);
2659 // Add linker options to link against the libraries/frameworks
2660 // described by this module.
2661 llvm::LLVMContext &Context = CGM.getLLVMContext();
2662 bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
2664 // For modules that use export_as for linking, use that module
2665 // name instead.
2666 if (Mod->UseExportAsModuleLinkName)
2667 return;
2669 for (const Module::LinkLibrary &LL : llvm::reverse(Mod->LinkLibraries)) {
2670 // Link against a framework. Frameworks are currently Darwin only, so we
2671 // don't to ask TargetCodeGenInfo for the spelling of the linker option.
2672 if (LL.IsFramework) {
2673 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"),
2674 llvm::MDString::get(Context, LL.Library)};
2676 Metadata.push_back(llvm::MDNode::get(Context, Args));
2677 continue;
2680 // Link against a library.
2681 if (IsELF) {
2682 llvm::Metadata *Args[2] = {
2683 llvm::MDString::get(Context, "lib"),
2684 llvm::MDString::get(Context, LL.Library),
2686 Metadata.push_back(llvm::MDNode::get(Context, Args));
2687 } else {
2688 llvm::SmallString<24> Opt;
2689 CGM.getTargetCodeGenInfo().getDependentLibraryOption(LL.Library, Opt);
2690 auto *OptString = llvm::MDString::get(Context, Opt);
2691 Metadata.push_back(llvm::MDNode::get(Context, OptString));
2696 void CodeGenModule::EmitModuleInitializers(clang::Module *Primary) {
2697 // Emit the initializers in the order that sub-modules appear in the
2698 // source, first Global Module Fragments, if present.
2699 if (auto GMF = Primary->getGlobalModuleFragment()) {
2700 for (Decl *D : getContext().getModuleInitializers(GMF)) {
2701 if (isa<ImportDecl>(D))
2702 continue;
2703 assert(isa<VarDecl>(D) && "GMF initializer decl is not a var?");
2704 EmitTopLevelDecl(D);
2707 // Second any associated with the module, itself.
2708 for (Decl *D : getContext().getModuleInitializers(Primary)) {
2709 // Skip import decls, the inits for those are called explicitly.
2710 if (isa<ImportDecl>(D))
2711 continue;
2712 EmitTopLevelDecl(D);
2714 // Third any associated with the Privat eMOdule Fragment, if present.
2715 if (auto PMF = Primary->getPrivateModuleFragment()) {
2716 for (Decl *D : getContext().getModuleInitializers(PMF)) {
2717 assert(isa<VarDecl>(D) && "PMF initializer decl is not a var?");
2718 EmitTopLevelDecl(D);
2723 void CodeGenModule::EmitModuleLinkOptions() {
2724 // Collect the set of all of the modules we want to visit to emit link
2725 // options, which is essentially the imported modules and all of their
2726 // non-explicit child modules.
2727 llvm::SetVector<clang::Module *> LinkModules;
2728 llvm::SmallPtrSet<clang::Module *, 16> Visited;
2729 SmallVector<clang::Module *, 16> Stack;
2731 // Seed the stack with imported modules.
2732 for (Module *M : ImportedModules) {
2733 // Do not add any link flags when an implementation TU of a module imports
2734 // a header of that same module.
2735 if (M->getTopLevelModuleName() == getLangOpts().CurrentModule &&
2736 !getLangOpts().isCompilingModule())
2737 continue;
2738 if (Visited.insert(M).second)
2739 Stack.push_back(M);
2742 // Find all of the modules to import, making a little effort to prune
2743 // non-leaf modules.
2744 while (!Stack.empty()) {
2745 clang::Module *Mod = Stack.pop_back_val();
2747 bool AnyChildren = false;
2749 // Visit the submodules of this module.
2750 for (const auto &SM : Mod->submodules()) {
2751 // Skip explicit children; they need to be explicitly imported to be
2752 // linked against.
2753 if (SM->IsExplicit)
2754 continue;
2756 if (Visited.insert(SM).second) {
2757 Stack.push_back(SM);
2758 AnyChildren = true;
2762 // We didn't find any children, so add this module to the list of
2763 // modules to link against.
2764 if (!AnyChildren) {
2765 LinkModules.insert(Mod);
2769 // Add link options for all of the imported modules in reverse topological
2770 // order. We don't do anything to try to order import link flags with respect
2771 // to linker options inserted by things like #pragma comment().
2772 SmallVector<llvm::MDNode *, 16> MetadataArgs;
2773 Visited.clear();
2774 for (Module *M : LinkModules)
2775 if (Visited.insert(M).second)
2776 addLinkOptionsPostorder(*this, M, MetadataArgs, Visited);
2777 std::reverse(MetadataArgs.begin(), MetadataArgs.end());
2778 LinkerOptionsMetadata.append(MetadataArgs.begin(), MetadataArgs.end());
2780 // Add the linker options metadata flag.
2781 auto *NMD = getModule().getOrInsertNamedMetadata("llvm.linker.options");
2782 for (auto *MD : LinkerOptionsMetadata)
2783 NMD->addOperand(MD);
2786 void CodeGenModule::EmitDeferred() {
2787 // Emit deferred declare target declarations.
2788 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
2789 getOpenMPRuntime().emitDeferredTargetDecls();
2791 // Emit code for any potentially referenced deferred decls. Since a
2792 // previously unused static decl may become used during the generation of code
2793 // for a static function, iterate until no changes are made.
2795 if (!DeferredVTables.empty()) {
2796 EmitDeferredVTables();
2798 // Emitting a vtable doesn't directly cause more vtables to
2799 // become deferred, although it can cause functions to be
2800 // emitted that then need those vtables.
2801 assert(DeferredVTables.empty());
2804 // Emit CUDA/HIP static device variables referenced by host code only.
2805 // Note we should not clear CUDADeviceVarODRUsedByHost since it is still
2806 // needed for further handling.
2807 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
2808 llvm::append_range(DeferredDeclsToEmit,
2809 getContext().CUDADeviceVarODRUsedByHost);
2811 // Stop if we're out of both deferred vtables and deferred declarations.
2812 if (DeferredDeclsToEmit.empty())
2813 return;
2815 // Grab the list of decls to emit. If EmitGlobalDefinition schedules more
2816 // work, it will not interfere with this.
2817 std::vector<GlobalDecl> CurDeclsToEmit;
2818 CurDeclsToEmit.swap(DeferredDeclsToEmit);
2820 for (GlobalDecl &D : CurDeclsToEmit) {
2821 // We should call GetAddrOfGlobal with IsForDefinition set to true in order
2822 // to get GlobalValue with exactly the type we need, not something that
2823 // might had been created for another decl with the same mangled name but
2824 // different type.
2825 llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(
2826 GetAddrOfGlobal(D, ForDefinition));
2828 // In case of different address spaces, we may still get a cast, even with
2829 // IsForDefinition equal to true. Query mangled names table to get
2830 // GlobalValue.
2831 if (!GV)
2832 GV = GetGlobalValue(getMangledName(D));
2834 // Make sure GetGlobalValue returned non-null.
2835 assert(GV);
2837 // Check to see if we've already emitted this. This is necessary
2838 // for a couple of reasons: first, decls can end up in the
2839 // deferred-decls queue multiple times, and second, decls can end
2840 // up with definitions in unusual ways (e.g. by an extern inline
2841 // function acquiring a strong function redefinition). Just
2842 // ignore these cases.
2843 if (!GV->isDeclaration())
2844 continue;
2846 // If this is OpenMP, check if it is legal to emit this global normally.
2847 if (LangOpts.OpenMP && OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(D))
2848 continue;
2850 // Otherwise, emit the definition and move on to the next one.
2851 EmitGlobalDefinition(D, GV);
2853 // If we found out that we need to emit more decls, do that recursively.
2854 // This has the advantage that the decls are emitted in a DFS and related
2855 // ones are close together, which is convenient for testing.
2856 if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) {
2857 EmitDeferred();
2858 assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty());
2863 void CodeGenModule::EmitVTablesOpportunistically() {
2864 // Try to emit external vtables as available_externally if they have emitted
2865 // all inlined virtual functions. It runs after EmitDeferred() and therefore
2866 // is not allowed to create new references to things that need to be emitted
2867 // lazily. Note that it also uses fact that we eagerly emitting RTTI.
2869 assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables())
2870 && "Only emit opportunistic vtables with optimizations");
2872 for (const CXXRecordDecl *RD : OpportunisticVTables) {
2873 assert(getVTables().isVTableExternal(RD) &&
2874 "This queue should only contain external vtables");
2875 if (getCXXABI().canSpeculativelyEmitVTable(RD))
2876 VTables.GenerateClassData(RD);
2878 OpportunisticVTables.clear();
2881 void CodeGenModule::EmitGlobalAnnotations() {
2882 if (Annotations.empty())
2883 return;
2885 // Create a new global variable for the ConstantStruct in the Module.
2886 llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get(
2887 Annotations[0]->getType(), Annotations.size()), Annotations);
2888 auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false,
2889 llvm::GlobalValue::AppendingLinkage,
2890 Array, "llvm.global.annotations");
2891 gv->setSection(AnnotationSection);
2894 llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) {
2895 llvm::Constant *&AStr = AnnotationStrings[Str];
2896 if (AStr)
2897 return AStr;
2899 // Not found yet, create a new global.
2900 llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
2901 auto *gv = new llvm::GlobalVariable(
2902 getModule(), s->getType(), true, llvm::GlobalValue::PrivateLinkage, s,
2903 ".str", nullptr, llvm::GlobalValue::NotThreadLocal,
2904 ConstGlobalsPtrTy->getAddressSpace());
2905 gv->setSection(AnnotationSection);
2906 gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2907 AStr = gv;
2908 return gv;
2911 llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) {
2912 SourceManager &SM = getContext().getSourceManager();
2913 PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2914 if (PLoc.isValid())
2915 return EmitAnnotationString(PLoc.getFilename());
2916 return EmitAnnotationString(SM.getBufferName(Loc));
2919 llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
2920 SourceManager &SM = getContext().getSourceManager();
2921 PresumedLoc PLoc = SM.getPresumedLoc(L);
2922 unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
2923 SM.getExpansionLineNumber(L);
2924 return llvm::ConstantInt::get(Int32Ty, LineNo);
2927 llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) {
2928 ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()};
2929 if (Exprs.empty())
2930 return llvm::ConstantPointerNull::get(ConstGlobalsPtrTy);
2932 llvm::FoldingSetNodeID ID;
2933 for (Expr *E : Exprs) {
2934 ID.Add(cast<clang::ConstantExpr>(E)->getAPValueResult());
2936 llvm::Constant *&Lookup = AnnotationArgs[ID.ComputeHash()];
2937 if (Lookup)
2938 return Lookup;
2940 llvm::SmallVector<llvm::Constant *, 4> LLVMArgs;
2941 LLVMArgs.reserve(Exprs.size());
2942 ConstantEmitter ConstEmiter(*this);
2943 llvm::transform(Exprs, std::back_inserter(LLVMArgs), [&](const Expr *E) {
2944 const auto *CE = cast<clang::ConstantExpr>(E);
2945 return ConstEmiter.emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(),
2946 CE->getType());
2948 auto *Struct = llvm::ConstantStruct::getAnon(LLVMArgs);
2949 auto *GV = new llvm::GlobalVariable(getModule(), Struct->getType(), true,
2950 llvm::GlobalValue::PrivateLinkage, Struct,
2951 ".args");
2952 GV->setSection(AnnotationSection);
2953 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2954 auto *Bitcasted = llvm::ConstantExpr::getBitCast(GV, GlobalsInt8PtrTy);
2956 Lookup = Bitcasted;
2957 return Bitcasted;
2960 llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
2961 const AnnotateAttr *AA,
2962 SourceLocation L) {
2963 // Get the globals for file name, annotation, and the line number.
2964 llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
2965 *UnitGV = EmitAnnotationUnit(L),
2966 *LineNoCst = EmitAnnotationLineNo(L),
2967 *Args = EmitAnnotationArgs(AA);
2969 llvm::Constant *GVInGlobalsAS = GV;
2970 if (GV->getAddressSpace() !=
2971 getDataLayout().getDefaultGlobalsAddressSpace()) {
2972 GVInGlobalsAS = llvm::ConstantExpr::getAddrSpaceCast(
2973 GV, GV->getValueType()->getPointerTo(
2974 getDataLayout().getDefaultGlobalsAddressSpace()));
2977 // Create the ConstantStruct for the global annotation.
2978 llvm::Constant *Fields[] = {
2979 llvm::ConstantExpr::getBitCast(GVInGlobalsAS, GlobalsInt8PtrTy),
2980 llvm::ConstantExpr::getBitCast(AnnoGV, ConstGlobalsPtrTy),
2981 llvm::ConstantExpr::getBitCast(UnitGV, ConstGlobalsPtrTy),
2982 LineNoCst,
2983 Args,
2985 return llvm::ConstantStruct::getAnon(Fields);
2988 void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
2989 llvm::GlobalValue *GV) {
2990 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2991 // Get the struct elements for these annotations.
2992 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2993 Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation()));
2996 bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn,
2997 SourceLocation Loc) const {
2998 const auto &NoSanitizeL = getContext().getNoSanitizeList();
2999 // NoSanitize by function name.
3000 if (NoSanitizeL.containsFunction(Kind, Fn->getName()))
3001 return true;
3002 // NoSanitize by location. Check "mainfile" prefix.
3003 auto &SM = Context.getSourceManager();
3004 const FileEntry &MainFile = *SM.getFileEntryForID(SM.getMainFileID());
3005 if (NoSanitizeL.containsMainFile(Kind, MainFile.getName()))
3006 return true;
3008 // Check "src" prefix.
3009 if (Loc.isValid())
3010 return NoSanitizeL.containsLocation(Kind, Loc);
3011 // If location is unknown, this may be a compiler-generated function. Assume
3012 // it's located in the main file.
3013 return NoSanitizeL.containsFile(Kind, MainFile.getName());
3016 bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind,
3017 llvm::GlobalVariable *GV,
3018 SourceLocation Loc, QualType Ty,
3019 StringRef Category) const {
3020 const auto &NoSanitizeL = getContext().getNoSanitizeList();
3021 if (NoSanitizeL.containsGlobal(Kind, GV->getName(), Category))
3022 return true;
3023 auto &SM = Context.getSourceManager();
3024 if (NoSanitizeL.containsMainFile(
3025 Kind, SM.getFileEntryForID(SM.getMainFileID())->getName(), Category))
3026 return true;
3027 if (NoSanitizeL.containsLocation(Kind, Loc, Category))
3028 return true;
3030 // Check global type.
3031 if (!Ty.isNull()) {
3032 // Drill down the array types: if global variable of a fixed type is
3033 // not sanitized, we also don't instrument arrays of them.
3034 while (auto AT = dyn_cast<ArrayType>(Ty.getTypePtr()))
3035 Ty = AT->getElementType();
3036 Ty = Ty.getCanonicalType().getUnqualifiedType();
3037 // Only record types (classes, structs etc.) are ignored.
3038 if (Ty->isRecordType()) {
3039 std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
3040 if (NoSanitizeL.containsType(Kind, TypeStr, Category))
3041 return true;
3044 return false;
3047 bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
3048 StringRef Category) const {
3049 const auto &XRayFilter = getContext().getXRayFilter();
3050 using ImbueAttr = XRayFunctionFilter::ImbueAttribute;
3051 auto Attr = ImbueAttr::NONE;
3052 if (Loc.isValid())
3053 Attr = XRayFilter.shouldImbueLocation(Loc, Category);
3054 if (Attr == ImbueAttr::NONE)
3055 Attr = XRayFilter.shouldImbueFunction(Fn->getName());
3056 switch (Attr) {
3057 case ImbueAttr::NONE:
3058 return false;
3059 case ImbueAttr::ALWAYS:
3060 Fn->addFnAttr("function-instrument", "xray-always");
3061 break;
3062 case ImbueAttr::ALWAYS_ARG1:
3063 Fn->addFnAttr("function-instrument", "xray-always");
3064 Fn->addFnAttr("xray-log-args", "1");
3065 break;
3066 case ImbueAttr::NEVER:
3067 Fn->addFnAttr("function-instrument", "xray-never");
3068 break;
3070 return true;
3073 ProfileList::ExclusionType
3074 CodeGenModule::isFunctionBlockedByProfileList(llvm::Function *Fn,
3075 SourceLocation Loc) const {
3076 const auto &ProfileList = getContext().getProfileList();
3077 // If the profile list is empty, then instrument everything.
3078 if (ProfileList.isEmpty())
3079 return ProfileList::Allow;
3080 CodeGenOptions::ProfileInstrKind Kind = getCodeGenOpts().getProfileInstr();
3081 // First, check the function name.
3082 if (auto V = ProfileList.isFunctionExcluded(Fn->getName(), Kind))
3083 return *V;
3084 // Next, check the source location.
3085 if (Loc.isValid())
3086 if (auto V = ProfileList.isLocationExcluded(Loc, Kind))
3087 return *V;
3088 // If location is unknown, this may be a compiler-generated function. Assume
3089 // it's located in the main file.
3090 auto &SM = Context.getSourceManager();
3091 if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID()))
3092 if (auto V = ProfileList.isFileExcluded(MainFile->getName(), Kind))
3093 return *V;
3094 return ProfileList.getDefault(Kind);
3097 ProfileList::ExclusionType
3098 CodeGenModule::isFunctionBlockedFromProfileInstr(llvm::Function *Fn,
3099 SourceLocation Loc) const {
3100 auto V = isFunctionBlockedByProfileList(Fn, Loc);
3101 if (V != ProfileList::Allow)
3102 return V;
3104 auto NumGroups = getCodeGenOpts().ProfileTotalFunctionGroups;
3105 if (NumGroups > 1) {
3106 auto Group = llvm::crc32(arrayRefFromStringRef(Fn->getName())) % NumGroups;
3107 if (Group != getCodeGenOpts().ProfileSelectedFunctionGroup)
3108 return ProfileList::Skip;
3110 return ProfileList::Allow;
3113 bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
3114 // Never defer when EmitAllDecls is specified.
3115 if (LangOpts.EmitAllDecls)
3116 return true;
3118 if (CodeGenOpts.KeepStaticConsts) {
3119 const auto *VD = dyn_cast<VarDecl>(Global);
3120 if (VD && VD->getType().isConstQualified() &&
3121 VD->getStorageDuration() == SD_Static)
3122 return true;
3125 return getContext().DeclMustBeEmitted(Global);
3128 bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
3129 // In OpenMP 5.0 variables and function may be marked as
3130 // device_type(host/nohost) and we should not emit them eagerly unless we sure
3131 // that they must be emitted on the host/device. To be sure we need to have
3132 // seen a declare target with an explicit mentioning of the function, we know
3133 // we have if the level of the declare target attribute is -1. Note that we
3134 // check somewhere else if we should emit this at all.
3135 if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd) {
3136 std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
3137 OMPDeclareTargetDeclAttr::getActiveAttr(Global);
3138 if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1)
3139 return false;
3142 if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
3143 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
3144 // Implicit template instantiations may change linkage if they are later
3145 // explicitly instantiated, so they should not be emitted eagerly.
3146 return false;
3148 if (const auto *VD = dyn_cast<VarDecl>(Global)) {
3149 if (Context.getInlineVariableDefinitionKind(VD) ==
3150 ASTContext::InlineVariableDefinitionKind::WeakUnknown)
3151 // A definition of an inline constexpr static data member may change
3152 // linkage later if it's redeclared outside the class.
3153 return false;
3154 if (CXX20ModuleInits && VD->getOwningModule() &&
3155 !VD->getOwningModule()->isModuleMapModule()) {
3156 // For CXX20, module-owned initializers need to be deferred, since it is
3157 // not known at this point if they will be run for the current module or
3158 // as part of the initializer for an imported one.
3159 return false;
3162 // If OpenMP is enabled and threadprivates must be generated like TLS, delay
3163 // codegen for global variables, because they may be marked as threadprivate.
3164 if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
3165 getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global) &&
3166 !isTypeConstant(Global->getType(), false, false) &&
3167 !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global))
3168 return false;
3170 return true;
3173 ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
3174 StringRef Name = getMangledName(GD);
3176 // The UUID descriptor should be pointer aligned.
3177 CharUnits Alignment = CharUnits::fromQuantity(PointerAlignInBytes);
3179 // Look for an existing global.
3180 if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
3181 return ConstantAddress(GV, GV->getValueType(), Alignment);
3183 ConstantEmitter Emitter(*this);
3184 llvm::Constant *Init;
3186 APValue &V = GD->getAsAPValue();
3187 if (!V.isAbsent()) {
3188 // If possible, emit the APValue version of the initializer. In particular,
3189 // this gets the type of the constant right.
3190 Init = Emitter.emitForInitializer(
3191 GD->getAsAPValue(), GD->getType().getAddressSpace(), GD->getType());
3192 } else {
3193 // As a fallback, directly construct the constant.
3194 // FIXME: This may get padding wrong under esoteric struct layout rules.
3195 // MSVC appears to create a complete type 'struct __s_GUID' that it
3196 // presumably uses to represent these constants.
3197 MSGuidDecl::Parts Parts = GD->getParts();
3198 llvm::Constant *Fields[4] = {
3199 llvm::ConstantInt::get(Int32Ty, Parts.Part1),
3200 llvm::ConstantInt::get(Int16Ty, Parts.Part2),
3201 llvm::ConstantInt::get(Int16Ty, Parts.Part3),
3202 llvm::ConstantDataArray::getRaw(
3203 StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), 8,
3204 Int8Ty)};
3205 Init = llvm::ConstantStruct::getAnon(Fields);
3208 auto *GV = new llvm::GlobalVariable(
3209 getModule(), Init->getType(),
3210 /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
3211 if (supportsCOMDAT())
3212 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
3213 setDSOLocal(GV);
3215 if (!V.isAbsent()) {
3216 Emitter.finalize(GV);
3217 return ConstantAddress(GV, GV->getValueType(), Alignment);
3220 llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
3221 llvm::Constant *Addr = llvm::ConstantExpr::getBitCast(
3222 GV, Ty->getPointerTo(GV->getAddressSpace()));
3223 return ConstantAddress(Addr, Ty, Alignment);
3226 ConstantAddress CodeGenModule::GetAddrOfUnnamedGlobalConstantDecl(
3227 const UnnamedGlobalConstantDecl *GCD) {
3228 CharUnits Alignment = getContext().getTypeAlignInChars(GCD->getType());
3230 llvm::GlobalVariable **Entry = nullptr;
3231 Entry = &UnnamedGlobalConstantDeclMap[GCD];
3232 if (*Entry)
3233 return ConstantAddress(*Entry, (*Entry)->getValueType(), Alignment);
3235 ConstantEmitter Emitter(*this);
3236 llvm::Constant *Init;
3238 const APValue &V = GCD->getValue();
3240 assert(!V.isAbsent());
3241 Init = Emitter.emitForInitializer(V, GCD->getType().getAddressSpace(),
3242 GCD->getType());
3244 auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(),
3245 /*isConstant=*/true,
3246 llvm::GlobalValue::PrivateLinkage, Init,
3247 ".constant");
3248 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3249 GV->setAlignment(Alignment.getAsAlign());
3251 Emitter.finalize(GV);
3253 *Entry = GV;
3254 return ConstantAddress(GV, GV->getValueType(), Alignment);
3257 ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
3258 const TemplateParamObjectDecl *TPO) {
3259 StringRef Name = getMangledName(TPO);
3260 CharUnits Alignment = getNaturalTypeAlignment(TPO->getType());
3262 if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
3263 return ConstantAddress(GV, GV->getValueType(), Alignment);
3265 ConstantEmitter Emitter(*this);
3266 llvm::Constant *Init = Emitter.emitForInitializer(
3267 TPO->getValue(), TPO->getType().getAddressSpace(), TPO->getType());
3269 if (!Init) {
3270 ErrorUnsupported(TPO, "template parameter object");
3271 return ConstantAddress::invalid();
3274 llvm::GlobalValue::LinkageTypes Linkage =
3275 isExternallyVisible(TPO->getLinkageAndVisibility().getLinkage())
3276 ? llvm::GlobalValue::LinkOnceODRLinkage
3277 : llvm::GlobalValue::InternalLinkage;
3278 auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(),
3279 /*isConstant=*/true, Linkage, Init, Name);
3280 setGVProperties(GV, TPO);
3281 if (supportsCOMDAT())
3282 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
3283 Emitter.finalize(GV);
3285 return ConstantAddress(GV, GV->getValueType(), Alignment);
3288 ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
3289 const AliasAttr *AA = VD->getAttr<AliasAttr>();
3290 assert(AA && "No alias?");
3292 CharUnits Alignment = getContext().getDeclAlign(VD);
3293 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
3295 // See if there is already something with the target's name in the module.
3296 llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
3297 if (Entry) {
3298 unsigned AS = getTypes().getTargetAddressSpace(VD->getType());
3299 auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
3300 return ConstantAddress(Ptr, DeclTy, Alignment);
3303 llvm::Constant *Aliasee;
3304 if (isa<llvm::FunctionType>(DeclTy))
3305 Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy,
3306 GlobalDecl(cast<FunctionDecl>(VD)),
3307 /*ForVTable=*/false);
3308 else
3309 Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, LangAS::Default,
3310 nullptr);
3312 auto *F = cast<llvm::GlobalValue>(Aliasee);
3313 F->setLinkage(llvm::Function::ExternalWeakLinkage);
3314 WeakRefReferences.insert(F);
3316 return ConstantAddress(Aliasee, DeclTy, Alignment);
3319 void CodeGenModule::EmitGlobal(GlobalDecl GD) {
3320 const auto *Global = cast<ValueDecl>(GD.getDecl());
3322 // Weak references don't produce any output by themselves.
3323 if (Global->hasAttr<WeakRefAttr>())
3324 return;
3326 // If this is an alias definition (which otherwise looks like a declaration)
3327 // emit it now.
3328 if (Global->hasAttr<AliasAttr>())
3329 return EmitAliasDefinition(GD);
3331 // IFunc like an alias whose value is resolved at runtime by calling resolver.
3332 if (Global->hasAttr<IFuncAttr>())
3333 return emitIFuncDefinition(GD);
3335 // If this is a cpu_dispatch multiversion function, emit the resolver.
3336 if (Global->hasAttr<CPUDispatchAttr>())
3337 return emitCPUDispatchDefinition(GD);
3339 // If this is CUDA, be selective about which declarations we emit.
3340 if (LangOpts.CUDA) {
3341 if (LangOpts.CUDAIsDevice) {
3342 if (!Global->hasAttr<CUDADeviceAttr>() &&
3343 !Global->hasAttr<CUDAGlobalAttr>() &&
3344 !Global->hasAttr<CUDAConstantAttr>() &&
3345 !Global->hasAttr<CUDASharedAttr>() &&
3346 !Global->getType()->isCUDADeviceBuiltinSurfaceType() &&
3347 !Global->getType()->isCUDADeviceBuiltinTextureType())
3348 return;
3349 } else {
3350 // We need to emit host-side 'shadows' for all global
3351 // device-side variables because the CUDA runtime needs their
3352 // size and host-side address in order to provide access to
3353 // their device-side incarnations.
3355 // So device-only functions are the only things we skip.
3356 if (isa<FunctionDecl>(Global) && !Global->hasAttr<CUDAHostAttr>() &&
3357 Global->hasAttr<CUDADeviceAttr>())
3358 return;
3360 assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) &&
3361 "Expected Variable or Function");
3365 if (LangOpts.OpenMP) {
3366 // If this is OpenMP, check if it is legal to emit this global normally.
3367 if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD))
3368 return;
3369 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(Global)) {
3370 if (MustBeEmitted(Global))
3371 EmitOMPDeclareReduction(DRD);
3372 return;
3374 if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
3375 if (MustBeEmitted(Global))
3376 EmitOMPDeclareMapper(DMD);
3377 return;
3381 // Ignore declarations, they will be emitted on their first use.
3382 if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
3383 // Forward declarations are emitted lazily on first use.
3384 if (!FD->doesThisDeclarationHaveABody()) {
3385 if (!FD->doesDeclarationForceExternallyVisibleDefinition())
3386 return;
3388 StringRef MangledName = getMangledName(GD);
3390 // Compute the function info and LLVM type.
3391 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
3392 llvm::Type *Ty = getTypes().GetFunctionType(FI);
3394 GetOrCreateLLVMFunction(MangledName, Ty, GD, /*ForVTable=*/false,
3395 /*DontDefer=*/false);
3396 return;
3398 } else {
3399 const auto *VD = cast<VarDecl>(Global);
3400 assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
3401 if (VD->isThisDeclarationADefinition() != VarDecl::Definition &&
3402 !Context.isMSStaticDataMemberInlineDefinition(VD)) {
3403 if (LangOpts.OpenMP) {
3404 // Emit declaration of the must-be-emitted declare target variable.
3405 if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
3406 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
3407 bool UnifiedMemoryEnabled =
3408 getOpenMPRuntime().hasRequiresUnifiedSharedMemory();
3409 if ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3410 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3411 !UnifiedMemoryEnabled) {
3412 (void)GetAddrOfGlobalVar(VD);
3413 } else {
3414 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
3415 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3416 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3417 UnifiedMemoryEnabled)) &&
3418 "Link clause or to clause with unified memory expected.");
3419 (void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
3422 return;
3425 // If this declaration may have caused an inline variable definition to
3426 // change linkage, make sure that it's emitted.
3427 if (Context.getInlineVariableDefinitionKind(VD) ==
3428 ASTContext::InlineVariableDefinitionKind::Strong)
3429 GetAddrOfGlobalVar(VD);
3430 return;
3434 // Defer code generation to first use when possible, e.g. if this is an inline
3435 // function. If the global must always be emitted, do it eagerly if possible
3436 // to benefit from cache locality.
3437 if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
3438 // Emit the definition if it can't be deferred.
3439 EmitGlobalDefinition(GD);
3440 return;
3443 // If we're deferring emission of a C++ variable with an
3444 // initializer, remember the order in which it appeared in the file.
3445 if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
3446 cast<VarDecl>(Global)->hasInit()) {
3447 DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
3448 CXXGlobalInits.push_back(nullptr);
3451 StringRef MangledName = getMangledName(GD);
3452 if (GetGlobalValue(MangledName) != nullptr) {
3453 // The value has already been used and should therefore be emitted.
3454 addDeferredDeclToEmit(GD);
3455 } else if (MustBeEmitted(Global)) {
3456 // The value must be emitted, but cannot be emitted eagerly.
3457 assert(!MayBeEmittedEagerly(Global));
3458 addDeferredDeclToEmit(GD);
3459 EmittedDeferredDecls[MangledName] = GD;
3460 } else {
3461 // Otherwise, remember that we saw a deferred decl with this name. The
3462 // first use of the mangled name will cause it to move into
3463 // DeferredDeclsToEmit.
3464 DeferredDecls[MangledName] = GD;
3468 // Check if T is a class type with a destructor that's not dllimport.
3469 static bool HasNonDllImportDtor(QualType T) {
3470 if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>())
3471 if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
3472 if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
3473 return true;
3475 return false;
3478 namespace {
3479 struct FunctionIsDirectlyRecursive
3480 : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> {
3481 const StringRef Name;
3482 const Builtin::Context &BI;
3483 FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C)
3484 : Name(N), BI(C) {}
3486 bool VisitCallExpr(const CallExpr *E) {
3487 const FunctionDecl *FD = E->getDirectCallee();
3488 if (!FD)
3489 return false;
3490 AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
3491 if (Attr && Name == Attr->getLabel())
3492 return true;
3493 unsigned BuiltinID = FD->getBuiltinID();
3494 if (!BuiltinID || !BI.isLibFunction(BuiltinID))
3495 return false;
3496 StringRef BuiltinName = BI.getName(BuiltinID);
3497 if (BuiltinName.startswith("__builtin_") &&
3498 Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
3499 return true;
3501 return false;
3504 bool VisitStmt(const Stmt *S) {
3505 for (const Stmt *Child : S->children())
3506 if (Child && this->Visit(Child))
3507 return true;
3508 return false;
3512 // Make sure we're not referencing non-imported vars or functions.
3513 struct DLLImportFunctionVisitor
3514 : public RecursiveASTVisitor<DLLImportFunctionVisitor> {
3515 bool SafeToInline = true;
3517 bool shouldVisitImplicitCode() const { return true; }
3519 bool VisitVarDecl(VarDecl *VD) {
3520 if (VD->getTLSKind()) {
3521 // A thread-local variable cannot be imported.
3522 SafeToInline = false;
3523 return SafeToInline;
3526 // A variable definition might imply a destructor call.
3527 if (VD->isThisDeclarationADefinition())
3528 SafeToInline = !HasNonDllImportDtor(VD->getType());
3530 return SafeToInline;
3533 bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
3534 if (const auto *D = E->getTemporary()->getDestructor())
3535 SafeToInline = D->hasAttr<DLLImportAttr>();
3536 return SafeToInline;
3539 bool VisitDeclRefExpr(DeclRefExpr *E) {
3540 ValueDecl *VD = E->getDecl();
3541 if (isa<FunctionDecl>(VD))
3542 SafeToInline = VD->hasAttr<DLLImportAttr>();
3543 else if (VarDecl *V = dyn_cast<VarDecl>(VD))
3544 SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>();
3545 return SafeToInline;
3548 bool VisitCXXConstructExpr(CXXConstructExpr *E) {
3549 SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>();
3550 return SafeToInline;
3553 bool VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
3554 CXXMethodDecl *M = E->getMethodDecl();
3555 if (!M) {
3556 // Call through a pointer to member function. This is safe to inline.
3557 SafeToInline = true;
3558 } else {
3559 SafeToInline = M->hasAttr<DLLImportAttr>();
3561 return SafeToInline;
3564 bool VisitCXXDeleteExpr(CXXDeleteExpr *E) {
3565 SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>();
3566 return SafeToInline;
3569 bool VisitCXXNewExpr(CXXNewExpr *E) {
3570 SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>();
3571 return SafeToInline;
3576 // isTriviallyRecursive - Check if this function calls another
3577 // decl that, because of the asm attribute or the other decl being a builtin,
3578 // ends up pointing to itself.
3579 bool
3580 CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
3581 StringRef Name;
3582 if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
3583 // asm labels are a special kind of mangling we have to support.
3584 AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
3585 if (!Attr)
3586 return false;
3587 Name = Attr->getLabel();
3588 } else {
3589 Name = FD->getName();
3592 FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
3593 const Stmt *Body = FD->getBody();
3594 return Body ? Walker.Visit(Body) : false;
3597 bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
3598 if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
3599 return true;
3600 const auto *F = cast<FunctionDecl>(GD.getDecl());
3601 if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
3602 return false;
3604 if (F->hasAttr<DLLImportAttr>() && !F->hasAttr<AlwaysInlineAttr>()) {
3605 // Check whether it would be safe to inline this dllimport function.
3606 DLLImportFunctionVisitor Visitor;
3607 Visitor.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
3608 if (!Visitor.SafeToInline)
3609 return false;
3611 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(F)) {
3612 // Implicit destructor invocations aren't captured in the AST, so the
3613 // check above can't see them. Check for them manually here.
3614 for (const Decl *Member : Dtor->getParent()->decls())
3615 if (isa<FieldDecl>(Member))
3616 if (HasNonDllImportDtor(cast<FieldDecl>(Member)->getType()))
3617 return false;
3618 for (const CXXBaseSpecifier &B : Dtor->getParent()->bases())
3619 if (HasNonDllImportDtor(B.getType()))
3620 return false;
3624 // Inline builtins declaration must be emitted. They often are fortified
3625 // functions.
3626 if (F->isInlineBuiltinDeclaration())
3627 return true;
3629 // PR9614. Avoid cases where the source code is lying to us. An available
3630 // externally function should have an equivalent function somewhere else,
3631 // but a function that calls itself through asm label/`__builtin_` trickery is
3632 // clearly not equivalent to the real implementation.
3633 // This happens in glibc's btowc and in some configure checks.
3634 return !isTriviallyRecursive(F);
3637 bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
3638 return CodeGenOpts.OptimizationLevel > 0;
3641 void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
3642 llvm::GlobalValue *GV) {
3643 const auto *FD = cast<FunctionDecl>(GD.getDecl());
3645 if (FD->isCPUSpecificMultiVersion()) {
3646 auto *Spec = FD->getAttr<CPUSpecificAttr>();
3647 for (unsigned I = 0; I < Spec->cpus_size(); ++I)
3648 EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
3649 } else if (FD->isTargetClonesMultiVersion()) {
3650 auto *Clone = FD->getAttr<TargetClonesAttr>();
3651 for (unsigned I = 0; I < Clone->featuresStrs_size(); ++I)
3652 if (Clone->isFirstOfVersion(I))
3653 EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
3654 // Ensure that the resolver function is also emitted.
3655 GetOrCreateMultiVersionResolver(GD);
3656 } else
3657 EmitGlobalFunctionDefinition(GD, GV);
3660 void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
3661 const auto *D = cast<ValueDecl>(GD.getDecl());
3663 PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
3664 Context.getSourceManager(),
3665 "Generating code for declaration");
3667 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3668 // At -O0, don't generate IR for functions with available_externally
3669 // linkage.
3670 if (!shouldEmitFunction(GD))
3671 return;
3673 llvm::TimeTraceScope TimeScope("CodeGen Function", [&]() {
3674 std::string Name;
3675 llvm::raw_string_ostream OS(Name);
3676 FD->getNameForDiagnostic(OS, getContext().getPrintingPolicy(),
3677 /*Qualified=*/true);
3678 return Name;
3681 if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
3682 // Make sure to emit the definition(s) before we emit the thunks.
3683 // This is necessary for the generation of certain thunks.
3684 if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
3685 ABI->emitCXXStructor(GD);
3686 else if (FD->isMultiVersion())
3687 EmitMultiVersionFunctionDefinition(GD, GV);
3688 else
3689 EmitGlobalFunctionDefinition(GD, GV);
3691 if (Method->isVirtual())
3692 getVTables().EmitThunks(GD);
3694 return;
3697 if (FD->isMultiVersion())
3698 return EmitMultiVersionFunctionDefinition(GD, GV);
3699 return EmitGlobalFunctionDefinition(GD, GV);
3702 if (const auto *VD = dyn_cast<VarDecl>(D))
3703 return EmitGlobalVarDefinition(VD, !VD->hasDefinition());
3705 llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
3708 static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
3709 llvm::Function *NewFn);
3711 static unsigned
3712 TargetMVPriority(const TargetInfo &TI,
3713 const CodeGenFunction::MultiVersionResolverOption &RO) {
3714 unsigned Priority = 0;
3715 unsigned NumFeatures = 0;
3716 for (StringRef Feat : RO.Conditions.Features) {
3717 Priority = std::max(Priority, TI.multiVersionSortPriority(Feat));
3718 NumFeatures++;
3721 if (!RO.Conditions.Architecture.empty())
3722 Priority = std::max(
3723 Priority, TI.multiVersionSortPriority(RO.Conditions.Architecture));
3725 Priority += TI.multiVersionFeatureCost() * NumFeatures;
3727 return Priority;
3730 // Multiversion functions should be at most 'WeakODRLinkage' so that a different
3731 // TU can forward declare the function without causing problems. Particularly
3732 // in the cases of CPUDispatch, this causes issues. This also makes sure we
3733 // work with internal linkage functions, so that the same function name can be
3734 // used with internal linkage in multiple TUs.
3735 llvm::GlobalValue::LinkageTypes getMultiversionLinkage(CodeGenModule &CGM,
3736 GlobalDecl GD) {
3737 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3738 if (FD->getFormalLinkage() == InternalLinkage)
3739 return llvm::GlobalValue::InternalLinkage;
3740 return llvm::GlobalValue::WeakODRLinkage;
3743 void CodeGenModule::emitMultiVersionFunctions() {
3744 std::vector<GlobalDecl> MVFuncsToEmit;
3745 MultiVersionFuncs.swap(MVFuncsToEmit);
3746 for (GlobalDecl GD : MVFuncsToEmit) {
3747 const auto *FD = cast<FunctionDecl>(GD.getDecl());
3748 assert(FD && "Expected a FunctionDecl");
3750 SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
3751 if (FD->isTargetMultiVersion()) {
3752 getContext().forEachMultiversionedFunctionVersion(
3753 FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
3754 GlobalDecl CurGD{
3755 (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
3756 StringRef MangledName = getMangledName(CurGD);
3757 llvm::Constant *Func = GetGlobalValue(MangledName);
3758 if (!Func) {
3759 if (CurFD->isDefined()) {
3760 EmitGlobalFunctionDefinition(CurGD, nullptr);
3761 Func = GetGlobalValue(MangledName);
3762 } else {
3763 const CGFunctionInfo &FI =
3764 getTypes().arrangeGlobalDeclaration(GD);
3765 llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
3766 Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
3767 /*DontDefer=*/false, ForDefinition);
3769 assert(Func && "This should have just been created");
3771 if (CurFD->getMultiVersionKind() == MultiVersionKind::Target) {
3772 const auto *TA = CurFD->getAttr<TargetAttr>();
3773 llvm::SmallVector<StringRef, 8> Feats;
3774 TA->getAddedFeatures(Feats);
3775 Options.emplace_back(cast<llvm::Function>(Func),
3776 TA->getArchitecture(), Feats);
3777 } else {
3778 const auto *TVA = CurFD->getAttr<TargetVersionAttr>();
3779 llvm::SmallVector<StringRef, 8> Feats;
3780 TVA->getFeatures(Feats);
3781 Options.emplace_back(cast<llvm::Function>(Func),
3782 /*Architecture*/ "", Feats);
3785 } else if (FD->isTargetClonesMultiVersion()) {
3786 const auto *TC = FD->getAttr<TargetClonesAttr>();
3787 for (unsigned VersionIndex = 0; VersionIndex < TC->featuresStrs_size();
3788 ++VersionIndex) {
3789 if (!TC->isFirstOfVersion(VersionIndex))
3790 continue;
3791 GlobalDecl CurGD{(FD->isDefined() ? FD->getDefinition() : FD),
3792 VersionIndex};
3793 StringRef Version = TC->getFeatureStr(VersionIndex);
3794 StringRef MangledName = getMangledName(CurGD);
3795 llvm::Constant *Func = GetGlobalValue(MangledName);
3796 if (!Func) {
3797 if (FD->isDefined()) {
3798 EmitGlobalFunctionDefinition(CurGD, nullptr);
3799 Func = GetGlobalValue(MangledName);
3800 } else {
3801 const CGFunctionInfo &FI =
3802 getTypes().arrangeGlobalDeclaration(CurGD);
3803 llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
3804 Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
3805 /*DontDefer=*/false, ForDefinition);
3807 assert(Func && "This should have just been created");
3810 StringRef Architecture;
3811 llvm::SmallVector<StringRef, 1> Feature;
3813 if (getTarget().getTriple().isAArch64()) {
3814 if (Version != "default") {
3815 llvm::SmallVector<StringRef, 8> VerFeats;
3816 Version.split(VerFeats, "+");
3817 for (auto &CurFeat : VerFeats)
3818 Feature.push_back(CurFeat.trim());
3820 } else {
3821 if (Version.startswith("arch="))
3822 Architecture = Version.drop_front(sizeof("arch=") - 1);
3823 else if (Version != "default")
3824 Feature.push_back(Version);
3827 Options.emplace_back(cast<llvm::Function>(Func), Architecture, Feature);
3829 } else {
3830 assert(0 && "Expected a target or target_clones multiversion function");
3831 continue;
3834 llvm::Constant *ResolverConstant = GetOrCreateMultiVersionResolver(GD);
3835 if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(ResolverConstant))
3836 ResolverConstant = IFunc->getResolver();
3837 llvm::Function *ResolverFunc = cast<llvm::Function>(ResolverConstant);
3839 ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
3841 if (supportsCOMDAT())
3842 ResolverFunc->setComdat(
3843 getModule().getOrInsertComdat(ResolverFunc->getName()));
3845 const TargetInfo &TI = getTarget();
3846 llvm::stable_sort(
3847 Options, [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
3848 const CodeGenFunction::MultiVersionResolverOption &RHS) {
3849 return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
3851 CodeGenFunction CGF(*this);
3852 CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3855 // Ensure that any additions to the deferred decls list caused by emitting a
3856 // variant are emitted. This can happen when the variant itself is inline and
3857 // calls a function without linkage.
3858 if (!MVFuncsToEmit.empty())
3859 EmitDeferred();
3861 // Ensure that any additions to the multiversion funcs list from either the
3862 // deferred decls or the multiversion functions themselves are emitted.
3863 if (!MultiVersionFuncs.empty())
3864 emitMultiVersionFunctions();
3867 void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
3868 const auto *FD = cast<FunctionDecl>(GD.getDecl());
3869 assert(FD && "Not a FunctionDecl?");
3870 assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?");
3871 const auto *DD = FD->getAttr<CPUDispatchAttr>();
3872 assert(DD && "Not a cpu_dispatch Function?");
3874 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
3875 llvm::FunctionType *DeclTy = getTypes().GetFunctionType(FI);
3877 StringRef ResolverName = getMangledName(GD);
3878 UpdateMultiVersionNames(GD, FD, ResolverName);
3880 llvm::Type *ResolverType;
3881 GlobalDecl ResolverGD;
3882 if (getTarget().supportsIFunc()) {
3883 ResolverType = llvm::FunctionType::get(
3884 llvm::PointerType::get(DeclTy,
3885 getTypes().getTargetAddressSpace(FD->getType())),
3886 false);
3888 else {
3889 ResolverType = DeclTy;
3890 ResolverGD = GD;
3893 auto *ResolverFunc = cast<llvm::Function>(GetOrCreateLLVMFunction(
3894 ResolverName, ResolverType, ResolverGD, /*ForVTable=*/false));
3895 ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
3896 if (supportsCOMDAT())
3897 ResolverFunc->setComdat(
3898 getModule().getOrInsertComdat(ResolverFunc->getName()));
3900 SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
3901 const TargetInfo &Target = getTarget();
3902 unsigned Index = 0;
3903 for (const IdentifierInfo *II : DD->cpus()) {
3904 // Get the name of the target function so we can look it up/create it.
3905 std::string MangledName = getMangledNameImpl(*this, GD, FD, true) +
3906 getCPUSpecificMangling(*this, II->getName());
3908 llvm::Constant *Func = GetGlobalValue(MangledName);
3910 if (!Func) {
3911 GlobalDecl ExistingDecl = Manglings.lookup(MangledName);
3912 if (ExistingDecl.getDecl() &&
3913 ExistingDecl.getDecl()->getAsFunction()->isDefined()) {
3914 EmitGlobalFunctionDefinition(ExistingDecl, nullptr);
3915 Func = GetGlobalValue(MangledName);
3916 } else {
3917 if (!ExistingDecl.getDecl())
3918 ExistingDecl = GD.getWithMultiVersionIndex(Index);
3920 Func = GetOrCreateLLVMFunction(
3921 MangledName, DeclTy, ExistingDecl,
3922 /*ForVTable=*/false, /*DontDefer=*/true,
3923 /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
3927 llvm::SmallVector<StringRef, 32> Features;
3928 Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features);
3929 llvm::transform(Features, Features.begin(),
3930 [](StringRef Str) { return Str.substr(1); });
3931 llvm::erase_if(Features, [&Target](StringRef Feat) {
3932 return !Target.validateCpuSupports(Feat);
3934 Options.emplace_back(cast<llvm::Function>(Func), StringRef{}, Features);
3935 ++Index;
3938 llvm::stable_sort(
3939 Options, [](const CodeGenFunction::MultiVersionResolverOption &LHS,
3940 const CodeGenFunction::MultiVersionResolverOption &RHS) {
3941 return llvm::X86::getCpuSupportsMask(LHS.Conditions.Features) >
3942 llvm::X86::getCpuSupportsMask(RHS.Conditions.Features);
3945 // If the list contains multiple 'default' versions, such as when it contains
3946 // 'pentium' and 'generic', don't emit the call to the generic one (since we
3947 // always run on at least a 'pentium'). We do this by deleting the 'least
3948 // advanced' (read, lowest mangling letter).
3949 while (Options.size() > 1 &&
3950 llvm::X86::getCpuSupportsMask(
3951 (Options.end() - 2)->Conditions.Features) == 0) {
3952 StringRef LHSName = (Options.end() - 2)->Function->getName();
3953 StringRef RHSName = (Options.end() - 1)->Function->getName();
3954 if (LHSName.compare(RHSName) < 0)
3955 Options.erase(Options.end() - 2);
3956 else
3957 Options.erase(Options.end() - 1);
3960 CodeGenFunction CGF(*this);
3961 CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3963 if (getTarget().supportsIFunc()) {
3964 llvm::GlobalValue::LinkageTypes Linkage = getMultiversionLinkage(*this, GD);
3965 auto *IFunc = cast<llvm::GlobalValue>(GetOrCreateMultiVersionResolver(GD));
3967 // Fix up function declarations that were created for cpu_specific before
3968 // cpu_dispatch was known
3969 if (!isa<llvm::GlobalIFunc>(IFunc)) {
3970 assert(cast<llvm::Function>(IFunc)->isDeclaration());
3971 auto *GI = llvm::GlobalIFunc::create(DeclTy, 0, Linkage, "", ResolverFunc,
3972 &getModule());
3973 GI->takeName(IFunc);
3974 IFunc->replaceAllUsesWith(GI);
3975 IFunc->eraseFromParent();
3976 IFunc = GI;
3979 std::string AliasName = getMangledNameImpl(
3980 *this, GD, FD, /*OmitMultiVersionMangling=*/true);
3981 llvm::Constant *AliasFunc = GetGlobalValue(AliasName);
3982 if (!AliasFunc) {
3983 auto *GA = llvm::GlobalAlias::create(DeclTy, 0, Linkage, AliasName, IFunc,
3984 &getModule());
3985 SetCommonAttributes(GD, GA);
3990 /// If a dispatcher for the specified mangled name is not in the module, create
3991 /// and return an llvm Function with the specified type.
3992 llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) {
3993 const auto *FD = cast<FunctionDecl>(GD.getDecl());
3994 assert(FD && "Not a FunctionDecl?");
3996 std::string MangledName =
3997 getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
3999 // Holds the name of the resolver, in ifunc mode this is the ifunc (which has
4000 // a separate resolver).
4001 std::string ResolverName = MangledName;
4002 if (getTarget().supportsIFunc())
4003 ResolverName += ".ifunc";
4004 else if (FD->isTargetMultiVersion())
4005 ResolverName += ".resolver";
4007 // If the resolver has already been created, just return it.
4008 if (llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName))
4009 return ResolverGV;
4011 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
4012 llvm::FunctionType *DeclTy = getTypes().GetFunctionType(FI);
4014 // The resolver needs to be created. For target and target_clones, defer
4015 // creation until the end of the TU.
4016 if (FD->isTargetMultiVersion() || FD->isTargetClonesMultiVersion())
4017 MultiVersionFuncs.push_back(GD);
4019 // For cpu_specific, don't create an ifunc yet because we don't know if the
4020 // cpu_dispatch will be emitted in this translation unit.
4021 if (getTarget().supportsIFunc() && !FD->isCPUSpecificMultiVersion()) {
4022 llvm::Type *ResolverType = llvm::FunctionType::get(
4023 llvm::PointerType::get(DeclTy,
4024 getTypes().getTargetAddressSpace(FD->getType())),
4025 false);
4026 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
4027 MangledName + ".resolver", ResolverType, GlobalDecl{},
4028 /*ForVTable=*/false);
4029 llvm::GlobalIFunc *GIF =
4030 llvm::GlobalIFunc::create(DeclTy, 0, getMultiversionLinkage(*this, GD),
4031 "", Resolver, &getModule());
4032 GIF->setName(ResolverName);
4033 SetCommonAttributes(FD, GIF);
4035 return GIF;
4038 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
4039 ResolverName, DeclTy, GlobalDecl{}, /*ForVTable=*/false);
4040 assert(isa<llvm::GlobalValue>(Resolver) &&
4041 "Resolver should be created for the first time");
4042 SetCommonAttributes(FD, cast<llvm::GlobalValue>(Resolver));
4043 return Resolver;
4046 /// GetOrCreateLLVMFunction - If the specified mangled name is not in the
4047 /// module, create and return an llvm Function with the specified type. If there
4048 /// is something in the module with the specified name, return it potentially
4049 /// bitcasted to the right type.
4051 /// If D is non-null, it specifies a decl that correspond to this. This is used
4052 /// to set the attributes on the function when it is first created.
4053 llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
4054 StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable,
4055 bool DontDefer, bool IsThunk, llvm::AttributeList ExtraAttrs,
4056 ForDefinition_t IsForDefinition) {
4057 const Decl *D = GD.getDecl();
4059 // Any attempts to use a MultiVersion function should result in retrieving
4060 // the iFunc instead. Name Mangling will handle the rest of the changes.
4061 if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D)) {
4062 // For the device mark the function as one that should be emitted.
4063 if (getLangOpts().OpenMPIsDevice && OpenMPRuntime &&
4064 !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
4065 !DontDefer && !IsForDefinition) {
4066 if (const FunctionDecl *FDDef = FD->getDefinition()) {
4067 GlobalDecl GDDef;
4068 if (const auto *CD = dyn_cast<CXXConstructorDecl>(FDDef))
4069 GDDef = GlobalDecl(CD, GD.getCtorType());
4070 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(FDDef))
4071 GDDef = GlobalDecl(DD, GD.getDtorType());
4072 else
4073 GDDef = GlobalDecl(FDDef);
4074 EmitGlobal(GDDef);
4078 if (FD->isMultiVersion()) {
4079 UpdateMultiVersionNames(GD, FD, MangledName);
4080 if (!IsForDefinition)
4081 return GetOrCreateMultiVersionResolver(GD);
4085 // Lookup the entry, lazily creating it if necessary.
4086 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
4087 if (Entry) {
4088 if (WeakRefReferences.erase(Entry)) {
4089 const FunctionDecl *FD = cast_or_null<FunctionDecl>(D);
4090 if (FD && !FD->hasAttr<WeakAttr>())
4091 Entry->setLinkage(llvm::Function::ExternalLinkage);
4094 // Handle dropped DLL attributes.
4095 if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>() &&
4096 !shouldMapVisibilityToDLLExport(cast_or_null<NamedDecl>(D))) {
4097 Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
4098 setDSOLocal(Entry);
4101 // If there are two attempts to define the same mangled name, issue an
4102 // error.
4103 if (IsForDefinition && !Entry->isDeclaration()) {
4104 GlobalDecl OtherGD;
4105 // Check that GD is not yet in DiagnosedConflictingDefinitions is required
4106 // to make sure that we issue an error only once.
4107 if (lookupRepresentativeDecl(MangledName, OtherGD) &&
4108 (GD.getCanonicalDecl().getDecl() !=
4109 OtherGD.getCanonicalDecl().getDecl()) &&
4110 DiagnosedConflictingDefinitions.insert(GD).second) {
4111 getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
4112 << MangledName;
4113 getDiags().Report(OtherGD.getDecl()->getLocation(),
4114 diag::note_previous_definition);
4118 if ((isa<llvm::Function>(Entry) || isa<llvm::GlobalAlias>(Entry)) &&
4119 (Entry->getValueType() == Ty)) {
4120 return Entry;
4123 // Make sure the result is of the correct type.
4124 // (If function is requested for a definition, we always need to create a new
4125 // function, not just return a bitcast.)
4126 if (!IsForDefinition)
4127 return llvm::ConstantExpr::getBitCast(
4128 Entry, Ty->getPointerTo(Entry->getAddressSpace()));
4131 // This function doesn't have a complete type (for example, the return
4132 // type is an incomplete struct). Use a fake type instead, and make
4133 // sure not to try to set attributes.
4134 bool IsIncompleteFunction = false;
4136 llvm::FunctionType *FTy;
4137 if (isa<llvm::FunctionType>(Ty)) {
4138 FTy = cast<llvm::FunctionType>(Ty);
4139 } else {
4140 FTy = llvm::FunctionType::get(VoidTy, false);
4141 IsIncompleteFunction = true;
4144 llvm::Function *F =
4145 llvm::Function::Create(FTy, llvm::Function::ExternalLinkage,
4146 Entry ? StringRef() : MangledName, &getModule());
4148 // If we already created a function with the same mangled name (but different
4149 // type) before, take its name and add it to the list of functions to be
4150 // replaced with F at the end of CodeGen.
4152 // This happens if there is a prototype for a function (e.g. "int f()") and
4153 // then a definition of a different type (e.g. "int f(int x)").
4154 if (Entry) {
4155 F->takeName(Entry);
4157 // This might be an implementation of a function without a prototype, in
4158 // which case, try to do special replacement of calls which match the new
4159 // prototype. The really key thing here is that we also potentially drop
4160 // arguments from the call site so as to make a direct call, which makes the
4161 // inliner happier and suppresses a number of optimizer warnings (!) about
4162 // dropping arguments.
4163 if (!Entry->use_empty()) {
4164 ReplaceUsesOfNonProtoTypeWithRealFunction(Entry, F);
4165 Entry->removeDeadConstantUsers();
4168 llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
4169 F, Entry->getValueType()->getPointerTo(Entry->getAddressSpace()));
4170 addGlobalValReplacement(Entry, BC);
4173 assert(F->getName() == MangledName && "name was uniqued!");
4174 if (D)
4175 SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
4176 if (ExtraAttrs.hasFnAttrs()) {
4177 llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs());
4178 F->addFnAttrs(B);
4181 if (!DontDefer) {
4182 // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
4183 // each other bottoming out with the base dtor. Therefore we emit non-base
4184 // dtors on usage, even if there is no dtor definition in the TU.
4185 if (isa_and_nonnull<CXXDestructorDecl>(D) &&
4186 getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
4187 GD.getDtorType()))
4188 addDeferredDeclToEmit(GD);
4190 // This is the first use or definition of a mangled name. If there is a
4191 // deferred decl with this name, remember that we need to emit it at the end
4192 // of the file.
4193 auto DDI = DeferredDecls.find(MangledName);
4194 if (DDI != DeferredDecls.end()) {
4195 // Move the potentially referenced deferred decl to the
4196 // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
4197 // don't need it anymore).
4198 addDeferredDeclToEmit(DDI->second);
4199 EmittedDeferredDecls[DDI->first] = DDI->second;
4200 DeferredDecls.erase(DDI);
4202 // Otherwise, there are cases we have to worry about where we're
4203 // using a declaration for which we must emit a definition but where
4204 // we might not find a top-level definition:
4205 // - member functions defined inline in their classes
4206 // - friend functions defined inline in some class
4207 // - special member functions with implicit definitions
4208 // If we ever change our AST traversal to walk into class methods,
4209 // this will be unnecessary.
4211 // We also don't emit a definition for a function if it's going to be an
4212 // entry in a vtable, unless it's already marked as used.
4213 } else if (getLangOpts().CPlusPlus && D) {
4214 // Look for a declaration that's lexically in a record.
4215 for (const auto *FD = cast<FunctionDecl>(D)->getMostRecentDecl(); FD;
4216 FD = FD->getPreviousDecl()) {
4217 if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
4218 if (FD->doesThisDeclarationHaveABody()) {
4219 addDeferredDeclToEmit(GD.getWithDecl(FD));
4220 break;
4227 // Make sure the result is of the requested type.
4228 if (!IsIncompleteFunction) {
4229 assert(F->getFunctionType() == Ty);
4230 return F;
4233 return llvm::ConstantExpr::getBitCast(F,
4234 Ty->getPointerTo(F->getAddressSpace()));
4237 /// GetAddrOfFunction - Return the address of the given function. If Ty is
4238 /// non-null, then this function will use the specified type if it has to
4239 /// create it (this occurs when we see a definition of the function).
4240 llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
4241 llvm::Type *Ty,
4242 bool ForVTable,
4243 bool DontDefer,
4244 ForDefinition_t IsForDefinition) {
4245 assert(!cast<FunctionDecl>(GD.getDecl())->isImmediateFunction() &&
4246 "an immediate function should never be emitted");
4247 // If there was no specific requested type, just convert it now.
4248 if (!Ty) {
4249 const auto *FD = cast<FunctionDecl>(GD.getDecl());
4250 Ty = getTypes().ConvertType(FD->getType());
4253 // Devirtualized destructor calls may come through here instead of via
4254 // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead
4255 // of the complete destructor when necessary.
4256 if (const auto *DD = dyn_cast<CXXDestructorDecl>(GD.getDecl())) {
4257 if (getTarget().getCXXABI().isMicrosoft() &&
4258 GD.getDtorType() == Dtor_Complete &&
4259 DD->getParent()->getNumVBases() == 0)
4260 GD = GlobalDecl(DD, Dtor_Base);
4263 StringRef MangledName = getMangledName(GD);
4264 auto *F = GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
4265 /*IsThunk=*/false, llvm::AttributeList(),
4266 IsForDefinition);
4267 // Returns kernel handle for HIP kernel stub function.
4268 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice &&
4269 cast<FunctionDecl>(GD.getDecl())->hasAttr<CUDAGlobalAttr>()) {
4270 auto *Handle = getCUDARuntime().getKernelHandle(
4271 cast<llvm::Function>(F->stripPointerCasts()), GD);
4272 if (IsForDefinition)
4273 return F;
4274 return llvm::ConstantExpr::getBitCast(Handle, Ty->getPointerTo());
4276 return F;
4279 llvm::Constant *CodeGenModule::GetFunctionStart(const ValueDecl *Decl) {
4280 llvm::GlobalValue *F =
4281 cast<llvm::GlobalValue>(GetAddrOfFunction(Decl)->stripPointerCasts());
4283 return llvm::ConstantExpr::getBitCast(
4284 llvm::NoCFIValue::get(F),
4285 llvm::Type::getInt8PtrTy(VMContext, F->getAddressSpace()));
4288 static const FunctionDecl *
4289 GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
4290 TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl();
4291 DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
4293 IdentifierInfo &CII = C.Idents.get(Name);
4294 for (const auto *Result : DC->lookup(&CII))
4295 if (const auto *FD = dyn_cast<FunctionDecl>(Result))
4296 return FD;
4298 if (!C.getLangOpts().CPlusPlus)
4299 return nullptr;
4301 // Demangle the premangled name from getTerminateFn()
4302 IdentifierInfo &CXXII =
4303 (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ")
4304 ? C.Idents.get("terminate")
4305 : C.Idents.get(Name);
4307 for (const auto &N : {"__cxxabiv1", "std"}) {
4308 IdentifierInfo &NS = C.Idents.get(N);
4309 for (const auto *Result : DC->lookup(&NS)) {
4310 const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Result);
4311 if (auto *LSD = dyn_cast<LinkageSpecDecl>(Result))
4312 for (const auto *Result : LSD->lookup(&NS))
4313 if ((ND = dyn_cast<NamespaceDecl>(Result)))
4314 break;
4316 if (ND)
4317 for (const auto *Result : ND->lookup(&CXXII))
4318 if (const auto *FD = dyn_cast<FunctionDecl>(Result))
4319 return FD;
4323 return nullptr;
4326 /// CreateRuntimeFunction - Create a new runtime function with the specified
4327 /// type and name.
4328 llvm::FunctionCallee
4329 CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
4330 llvm::AttributeList ExtraAttrs, bool Local,
4331 bool AssumeConvergent) {
4332 if (AssumeConvergent) {
4333 ExtraAttrs =
4334 ExtraAttrs.addFnAttribute(VMContext, llvm::Attribute::Convergent);
4337 llvm::Constant *C =
4338 GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
4339 /*DontDefer=*/false, /*IsThunk=*/false,
4340 ExtraAttrs);
4342 if (auto *F = dyn_cast<llvm::Function>(C)) {
4343 if (F->empty()) {
4344 F->setCallingConv(getRuntimeCC());
4346 // In Windows Itanium environments, try to mark runtime functions
4347 // dllimport. For Mingw and MSVC, don't. We don't really know if the user
4348 // will link their standard library statically or dynamically. Marking
4349 // functions imported when they are not imported can cause linker errors
4350 // and warnings.
4351 if (!Local && getTriple().isWindowsItaniumEnvironment() &&
4352 !getCodeGenOpts().LTOVisibilityPublicStd) {
4353 const FunctionDecl *FD = GetRuntimeFunctionDecl(Context, Name);
4354 if (!FD || FD->hasAttr<DLLImportAttr>()) {
4355 F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
4356 F->setLinkage(llvm::GlobalValue::ExternalLinkage);
4359 setDSOLocal(F);
4363 return {FTy, C};
4366 /// isTypeConstant - Determine whether an object of this type can be emitted
4367 /// as a constant.
4369 /// If ExcludeCtor is true, the duration when the object's constructor runs
4370 /// will not be considered. The caller will need to verify that the object is
4371 /// not written to during its construction. ExcludeDtor works similarly.
4372 bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor,
4373 bool ExcludeDtor) {
4374 if (!Ty.isConstant(Context) && !Ty->isReferenceType())
4375 return false;
4377 if (Context.getLangOpts().CPlusPlus) {
4378 if (const CXXRecordDecl *Record
4379 = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
4380 return ExcludeCtor && !Record->hasMutableFields() &&
4381 (Record->hasTrivialDestructor() || ExcludeDtor);
4384 return true;
4387 /// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
4388 /// create and return an llvm GlobalVariable with the specified type and address
4389 /// space. If there is something in the module with the specified name, return
4390 /// it potentially bitcasted to the right type.
4392 /// If D is non-null, it specifies a decl that correspond to this. This is used
4393 /// to set the attributes on the global when it is first created.
4395 /// If IsForDefinition is true, it is guaranteed that an actual global with
4396 /// type Ty will be returned, not conversion of a variable with the same
4397 /// mangled name but some other type.
4398 llvm::Constant *
4399 CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
4400 LangAS AddrSpace, const VarDecl *D,
4401 ForDefinition_t IsForDefinition) {
4402 // Lookup the entry, lazily creating it if necessary.
4403 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
4404 unsigned TargetAS = getContext().getTargetAddressSpace(AddrSpace);
4405 if (Entry) {
4406 if (WeakRefReferences.erase(Entry)) {
4407 if (D && !D->hasAttr<WeakAttr>())
4408 Entry->setLinkage(llvm::Function::ExternalLinkage);
4411 // Handle dropped DLL attributes.
4412 if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>() &&
4413 !shouldMapVisibilityToDLLExport(D))
4414 Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
4416 if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
4417 getOpenMPRuntime().registerTargetGlobalVariable(D, Entry);
4419 if (Entry->getValueType() == Ty && Entry->getAddressSpace() == TargetAS)
4420 return Entry;
4422 // If there are two attempts to define the same mangled name, issue an
4423 // error.
4424 if (IsForDefinition && !Entry->isDeclaration()) {
4425 GlobalDecl OtherGD;
4426 const VarDecl *OtherD;
4428 // Check that D is not yet in DiagnosedConflictingDefinitions is required
4429 // to make sure that we issue an error only once.
4430 if (D && lookupRepresentativeDecl(MangledName, OtherGD) &&
4431 (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) &&
4432 (OtherD = dyn_cast<VarDecl>(OtherGD.getDecl())) &&
4433 OtherD->hasInit() &&
4434 DiagnosedConflictingDefinitions.insert(D).second) {
4435 getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
4436 << MangledName;
4437 getDiags().Report(OtherGD.getDecl()->getLocation(),
4438 diag::note_previous_definition);
4442 // Make sure the result is of the correct type.
4443 if (Entry->getType()->getAddressSpace() != TargetAS) {
4444 return llvm::ConstantExpr::getAddrSpaceCast(Entry,
4445 Ty->getPointerTo(TargetAS));
4448 // (If global is requested for a definition, we always need to create a new
4449 // global, not just return a bitcast.)
4450 if (!IsForDefinition)
4451 return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo(TargetAS));
4454 auto DAddrSpace = GetGlobalVarAddressSpace(D);
4456 auto *GV = new llvm::GlobalVariable(
4457 getModule(), Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr,
4458 MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal,
4459 getContext().getTargetAddressSpace(DAddrSpace));
4461 // If we already created a global with the same mangled name (but different
4462 // type) before, take its name and remove it from its parent.
4463 if (Entry) {
4464 GV->takeName(Entry);
4466 if (!Entry->use_empty()) {
4467 llvm::Constant *NewPtrForOldDecl =
4468 llvm::ConstantExpr::getBitCast(GV, Entry->getType());
4469 Entry->replaceAllUsesWith(NewPtrForOldDecl);
4472 Entry->eraseFromParent();
4475 // This is the first use or definition of a mangled name. If there is a
4476 // deferred decl with this name, remember that we need to emit it at the end
4477 // of the file.
4478 auto DDI = DeferredDecls.find(MangledName);
4479 if (DDI != DeferredDecls.end()) {
4480 // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
4481 // list, and remove it from DeferredDecls (since we don't need it anymore).
4482 addDeferredDeclToEmit(DDI->second);
4483 EmittedDeferredDecls[DDI->first] = DDI->second;
4484 DeferredDecls.erase(DDI);
4487 // Handle things which are present even on external declarations.
4488 if (D) {
4489 if (LangOpts.OpenMP && !LangOpts.OpenMPSimd)
4490 getOpenMPRuntime().registerTargetGlobalVariable(D, GV);
4492 // FIXME: This code is overly simple and should be merged with other global
4493 // handling.
4494 GV->setConstant(isTypeConstant(D->getType(), false, false));
4496 GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
4498 setLinkageForGV(GV, D);
4500 if (D->getTLSKind()) {
4501 if (D->getTLSKind() == VarDecl::TLS_Dynamic)
4502 CXXThreadLocals.push_back(D);
4503 setTLSMode(GV, *D);
4506 setGVProperties(GV, D);
4508 // If required by the ABI, treat declarations of static data members with
4509 // inline initializers as definitions.
4510 if (getContext().isMSStaticDataMemberInlineDefinition(D)) {
4511 EmitGlobalVarDefinition(D);
4514 // Emit section information for extern variables.
4515 if (D->hasExternalStorage()) {
4516 if (const SectionAttr *SA = D->getAttr<SectionAttr>())
4517 GV->setSection(SA->getName());
4520 // Handle XCore specific ABI requirements.
4521 if (getTriple().getArch() == llvm::Triple::xcore &&
4522 D->getLanguageLinkage() == CLanguageLinkage &&
4523 D->getType().isConstant(Context) &&
4524 isExternallyVisible(D->getLinkageAndVisibility().getLinkage()))
4525 GV->setSection(".cp.rodata");
4527 // Check if we a have a const declaration with an initializer, we may be
4528 // able to emit it as available_externally to expose it's value to the
4529 // optimizer.
4530 if (Context.getLangOpts().CPlusPlus && GV->hasExternalLinkage() &&
4531 D->getType().isConstQualified() && !GV->hasInitializer() &&
4532 !D->hasDefinition() && D->hasInit() && !D->hasAttr<DLLImportAttr>()) {
4533 const auto *Record =
4534 Context.getBaseElementType(D->getType())->getAsCXXRecordDecl();
4535 bool HasMutableFields = Record && Record->hasMutableFields();
4536 if (!HasMutableFields) {
4537 const VarDecl *InitDecl;
4538 const Expr *InitExpr = D->getAnyInitializer(InitDecl);
4539 if (InitExpr) {
4540 ConstantEmitter emitter(*this);
4541 llvm::Constant *Init = emitter.tryEmitForInitializer(*InitDecl);
4542 if (Init) {
4543 auto *InitType = Init->getType();
4544 if (GV->getValueType() != InitType) {
4545 // The type of the initializer does not match the definition.
4546 // This happens when an initializer has a different type from
4547 // the type of the global (because of padding at the end of a
4548 // structure for instance).
4549 GV->setName(StringRef());
4550 // Make a new global with the correct type, this is now guaranteed
4551 // to work.
4552 auto *NewGV = cast<llvm::GlobalVariable>(
4553 GetAddrOfGlobalVar(D, InitType, IsForDefinition)
4554 ->stripPointerCasts());
4556 // Erase the old global, since it is no longer used.
4557 GV->eraseFromParent();
4558 GV = NewGV;
4559 } else {
4560 GV->setInitializer(Init);
4561 GV->setConstant(true);
4562 GV->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
4564 emitter.finalize(GV);
4571 if (GV->isDeclaration()) {
4572 getTargetCodeGenInfo().setTargetAttributes(D, GV, *this);
4573 // External HIP managed variables needed to be recorded for transformation
4574 // in both device and host compilations.
4575 if (getLangOpts().CUDA && D && D->hasAttr<HIPManagedAttr>() &&
4576 D->hasExternalStorage())
4577 getCUDARuntime().handleVarRegistration(D, *GV);
4580 if (D)
4581 SanitizerMD->reportGlobal(GV, *D);
4583 LangAS ExpectedAS =
4584 D ? D->getType().getAddressSpace()
4585 : (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default);
4586 assert(getContext().getTargetAddressSpace(ExpectedAS) == TargetAS);
4587 if (DAddrSpace != ExpectedAS) {
4588 return getTargetCodeGenInfo().performAddrSpaceCast(
4589 *this, GV, DAddrSpace, ExpectedAS, Ty->getPointerTo(TargetAS));
4592 return GV;
4595 llvm::Constant *
4596 CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) {
4597 const Decl *D = GD.getDecl();
4599 if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
4600 return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
4601 /*DontDefer=*/false, IsForDefinition);
4603 if (isa<CXXMethodDecl>(D)) {
4604 auto FInfo =
4605 &getTypes().arrangeCXXMethodDeclaration(cast<CXXMethodDecl>(D));
4606 auto Ty = getTypes().GetFunctionType(*FInfo);
4607 return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
4608 IsForDefinition);
4611 if (isa<FunctionDecl>(D)) {
4612 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
4613 llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
4614 return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
4615 IsForDefinition);
4618 return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr, IsForDefinition);
4621 llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
4622 StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage,
4623 llvm::Align Alignment) {
4624 llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
4625 llvm::GlobalVariable *OldGV = nullptr;
4627 if (GV) {
4628 // Check if the variable has the right type.
4629 if (GV->getValueType() == Ty)
4630 return GV;
4632 // Because C++ name mangling, the only way we can end up with an already
4633 // existing global with the same name is if it has been declared extern "C".
4634 assert(GV->isDeclaration() && "Declaration has wrong type!");
4635 OldGV = GV;
4638 // Create a new variable.
4639 GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
4640 Linkage, nullptr, Name);
4642 if (OldGV) {
4643 // Replace occurrences of the old variable if needed.
4644 GV->takeName(OldGV);
4646 if (!OldGV->use_empty()) {
4647 llvm::Constant *NewPtrForOldDecl =
4648 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
4649 OldGV->replaceAllUsesWith(NewPtrForOldDecl);
4652 OldGV->eraseFromParent();
4655 if (supportsCOMDAT() && GV->isWeakForLinker() &&
4656 !GV->hasAvailableExternallyLinkage())
4657 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
4659 GV->setAlignment(Alignment);
4661 return GV;
4664 /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
4665 /// given global variable. If Ty is non-null and if the global doesn't exist,
4666 /// then it will be created with the specified type instead of whatever the
4667 /// normal requested type would be. If IsForDefinition is true, it is guaranteed
4668 /// that an actual global with type Ty will be returned, not conversion of a
4669 /// variable with the same mangled name but some other type.
4670 llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
4671 llvm::Type *Ty,
4672 ForDefinition_t IsForDefinition) {
4673 assert(D->hasGlobalStorage() && "Not a global variable");
4674 QualType ASTTy = D->getType();
4675 if (!Ty)
4676 Ty = getTypes().ConvertTypeForMem(ASTTy);
4678 StringRef MangledName = getMangledName(D);
4679 return GetOrCreateLLVMGlobal(MangledName, Ty, ASTTy.getAddressSpace(), D,
4680 IsForDefinition);
4683 /// CreateRuntimeVariable - Create a new runtime global variable with the
4684 /// specified type and name.
4685 llvm::Constant *
4686 CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
4687 StringRef Name) {
4688 LangAS AddrSpace = getContext().getLangOpts().OpenCL ? LangAS::opencl_global
4689 : LangAS::Default;
4690 auto *Ret = GetOrCreateLLVMGlobal(Name, Ty, AddrSpace, nullptr);
4691 setDSOLocal(cast<llvm::GlobalValue>(Ret->stripPointerCasts()));
4692 return Ret;
4695 void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
4696 assert(!D->getInit() && "Cannot emit definite definitions here!");
4698 StringRef MangledName = getMangledName(D);
4699 llvm::GlobalValue *GV = GetGlobalValue(MangledName);
4701 // We already have a definition, not declaration, with the same mangled name.
4702 // Emitting of declaration is not required (and actually overwrites emitted
4703 // definition).
4704 if (GV && !GV->isDeclaration())
4705 return;
4707 // If we have not seen a reference to this variable yet, place it into the
4708 // deferred declarations table to be emitted if needed later.
4709 if (!MustBeEmitted(D) && !GV) {
4710 DeferredDecls[MangledName] = D;
4711 return;
4714 // The tentative definition is the only definition.
4715 EmitGlobalVarDefinition(D);
4718 void CodeGenModule::EmitExternalDeclaration(const VarDecl *D) {
4719 EmitExternalVarDeclaration(D);
4722 CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
4723 return Context.toCharUnitsFromBits(
4724 getDataLayout().getTypeStoreSizeInBits(Ty));
4727 LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
4728 if (LangOpts.OpenCL) {
4729 LangAS AS = D ? D->getType().getAddressSpace() : LangAS::opencl_global;
4730 assert(AS == LangAS::opencl_global ||
4731 AS == LangAS::opencl_global_device ||
4732 AS == LangAS::opencl_global_host ||
4733 AS == LangAS::opencl_constant ||
4734 AS == LangAS::opencl_local ||
4735 AS >= LangAS::FirstTargetAddressSpace);
4736 return AS;
4739 if (LangOpts.SYCLIsDevice &&
4740 (!D || D->getType().getAddressSpace() == LangAS::Default))
4741 return LangAS::sycl_global;
4743 if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
4744 if (D) {
4745 if (D->hasAttr<CUDAConstantAttr>())
4746 return LangAS::cuda_constant;
4747 if (D->hasAttr<CUDASharedAttr>())
4748 return LangAS::cuda_shared;
4749 if (D->hasAttr<CUDADeviceAttr>())
4750 return LangAS::cuda_device;
4751 if (D->getType().isConstQualified())
4752 return LangAS::cuda_constant;
4754 return LangAS::cuda_device;
4757 if (LangOpts.OpenMP) {
4758 LangAS AS;
4759 if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(D, AS))
4760 return AS;
4762 return getTargetCodeGenInfo().getGlobalVarAddressSpace(*this, D);
4765 LangAS CodeGenModule::GetGlobalConstantAddressSpace() const {
4766 // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
4767 if (LangOpts.OpenCL)
4768 return LangAS::opencl_constant;
4769 if (LangOpts.SYCLIsDevice)
4770 return LangAS::sycl_global;
4771 if (LangOpts.HIP && LangOpts.CUDAIsDevice && getTriple().isSPIRV())
4772 // For HIPSPV map literals to cuda_device (maps to CrossWorkGroup in SPIR-V)
4773 // instead of default AS (maps to Generic in SPIR-V). Otherwise, we end up
4774 // with OpVariable instructions with Generic storage class which is not
4775 // allowed (SPIR-V V1.6 s3.42.8). Also, mapping literals to SPIR-V
4776 // UniformConstant storage class is not viable as pointers to it may not be
4777 // casted to Generic pointers which are used to model HIP's "flat" pointers.
4778 return LangAS::cuda_device;
4779 if (auto AS = getTarget().getConstantAddressSpace())
4780 return *AS;
4781 return LangAS::Default;
4784 // In address space agnostic languages, string literals are in default address
4785 // space in AST. However, certain targets (e.g. amdgcn) request them to be
4786 // emitted in constant address space in LLVM IR. To be consistent with other
4787 // parts of AST, string literal global variables in constant address space
4788 // need to be casted to default address space before being put into address
4789 // map and referenced by other part of CodeGen.
4790 // In OpenCL, string literals are in constant address space in AST, therefore
4791 // they should not be casted to default address space.
4792 static llvm::Constant *
4793 castStringLiteralToDefaultAddressSpace(CodeGenModule &CGM,
4794 llvm::GlobalVariable *GV) {
4795 llvm::Constant *Cast = GV;
4796 if (!CGM.getLangOpts().OpenCL) {
4797 auto AS = CGM.GetGlobalConstantAddressSpace();
4798 if (AS != LangAS::Default)
4799 Cast = CGM.getTargetCodeGenInfo().performAddrSpaceCast(
4800 CGM, GV, AS, LangAS::Default,
4801 GV->getValueType()->getPointerTo(
4802 CGM.getContext().getTargetAddressSpace(LangAS::Default)));
4804 return Cast;
4807 template<typename SomeDecl>
4808 void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D,
4809 llvm::GlobalValue *GV) {
4810 if (!getLangOpts().CPlusPlus)
4811 return;
4813 // Must have 'used' attribute, or else inline assembly can't rely on
4814 // the name existing.
4815 if (!D->template hasAttr<UsedAttr>())
4816 return;
4818 // Must have internal linkage and an ordinary name.
4819 if (!D->getIdentifier() || D->getFormalLinkage() != InternalLinkage)
4820 return;
4822 // Must be in an extern "C" context. Entities declared directly within
4823 // a record are not extern "C" even if the record is in such a context.
4824 const SomeDecl *First = D->getFirstDecl();
4825 if (First->getDeclContext()->isRecord() || !First->isInExternCContext())
4826 return;
4828 // OK, this is an internal linkage entity inside an extern "C" linkage
4829 // specification. Make a note of that so we can give it the "expected"
4830 // mangled name if nothing else is using that name.
4831 std::pair<StaticExternCMap::iterator, bool> R =
4832 StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV));
4834 // If we have multiple internal linkage entities with the same name
4835 // in extern "C" regions, none of them gets that name.
4836 if (!R.second)
4837 R.first->second = nullptr;
4840 static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
4841 if (!CGM.supportsCOMDAT())
4842 return false;
4844 if (D.hasAttr<SelectAnyAttr>())
4845 return true;
4847 GVALinkage Linkage;
4848 if (auto *VD = dyn_cast<VarDecl>(&D))
4849 Linkage = CGM.getContext().GetGVALinkageForVariable(VD);
4850 else
4851 Linkage = CGM.getContext().GetGVALinkageForFunction(cast<FunctionDecl>(&D));
4853 switch (Linkage) {
4854 case GVA_Internal:
4855 case GVA_AvailableExternally:
4856 case GVA_StrongExternal:
4857 return false;
4858 case GVA_DiscardableODR:
4859 case GVA_StrongODR:
4860 return true;
4862 llvm_unreachable("No such linkage");
4865 void CodeGenModule::maybeSetTrivialComdat(const Decl &D,
4866 llvm::GlobalObject &GO) {
4867 if (!shouldBeInCOMDAT(*this, D))
4868 return;
4869 GO.setComdat(TheModule.getOrInsertComdat(GO.getName()));
4872 /// Pass IsTentative as true if you want to create a tentative definition.
4873 void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
4874 bool IsTentative) {
4875 // OpenCL global variables of sampler type are translated to function calls,
4876 // therefore no need to be translated.
4877 QualType ASTTy = D->getType();
4878 if (getLangOpts().OpenCL && ASTTy->isSamplerT())
4879 return;
4881 // If this is OpenMP device, check if it is legal to emit this global
4882 // normally.
4883 if (LangOpts.OpenMPIsDevice && OpenMPRuntime &&
4884 OpenMPRuntime->emitTargetGlobalVariable(D))
4885 return;
4887 llvm::TrackingVH<llvm::Constant> Init;
4888 bool NeedsGlobalCtor = false;
4889 // Whether the definition of the variable is available externally.
4890 // If yes, we shouldn't emit the GloablCtor and GlobalDtor for the variable
4891 // since this is the job for its original source.
4892 bool IsDefinitionAvailableExternally =
4893 getContext().GetGVALinkageForVariable(D) == GVA_AvailableExternally;
4894 bool NeedsGlobalDtor =
4895 !IsDefinitionAvailableExternally &&
4896 D->needsDestruction(getContext()) == QualType::DK_cxx_destructor;
4898 const VarDecl *InitDecl;
4899 const Expr *InitExpr = D->getAnyInitializer(InitDecl);
4901 std::optional<ConstantEmitter> emitter;
4903 // CUDA E.2.4.1 "__shared__ variables cannot have an initialization
4904 // as part of their declaration." Sema has already checked for
4905 // error cases, so we just need to set Init to UndefValue.
4906 bool IsCUDASharedVar =
4907 getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>();
4908 // Shadows of initialized device-side global variables are also left
4909 // undefined.
4910 // Managed Variables should be initialized on both host side and device side.
4911 bool IsCUDAShadowVar =
4912 !getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
4913 (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
4914 D->hasAttr<CUDASharedAttr>());
4915 bool IsCUDADeviceShadowVar =
4916 getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
4917 (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
4918 D->getType()->isCUDADeviceBuiltinTextureType());
4919 if (getLangOpts().CUDA &&
4920 (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar))
4921 Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
4922 else if (D->hasAttr<LoaderUninitializedAttr>())
4923 Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
4924 else if (!InitExpr) {
4925 // This is a tentative definition; tentative definitions are
4926 // implicitly initialized with { 0 }.
4928 // Note that tentative definitions are only emitted at the end of
4929 // a translation unit, so they should never have incomplete
4930 // type. In addition, EmitTentativeDefinition makes sure that we
4931 // never attempt to emit a tentative definition if a real one
4932 // exists. A use may still exists, however, so we still may need
4933 // to do a RAUW.
4934 assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
4935 Init = EmitNullConstant(D->getType());
4936 } else {
4937 initializedGlobalDecl = GlobalDecl(D);
4938 emitter.emplace(*this);
4939 llvm::Constant *Initializer = emitter->tryEmitForInitializer(*InitDecl);
4940 if (!Initializer) {
4941 QualType T = InitExpr->getType();
4942 if (D->getType()->isReferenceType())
4943 T = D->getType();
4945 if (getLangOpts().CPlusPlus) {
4946 if (InitDecl->hasFlexibleArrayInit(getContext()))
4947 ErrorUnsupported(D, "flexible array initializer");
4948 Init = EmitNullConstant(T);
4950 if (!IsDefinitionAvailableExternally)
4951 NeedsGlobalCtor = true;
4952 } else {
4953 ErrorUnsupported(D, "static initializer");
4954 Init = llvm::UndefValue::get(getTypes().ConvertType(T));
4956 } else {
4957 Init = Initializer;
4958 // We don't need an initializer, so remove the entry for the delayed
4959 // initializer position (just in case this entry was delayed) if we
4960 // also don't need to register a destructor.
4961 if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
4962 DelayedCXXInitPosition.erase(D);
4964 #ifndef NDEBUG
4965 CharUnits VarSize = getContext().getTypeSizeInChars(ASTTy) +
4966 InitDecl->getFlexibleArrayInitChars(getContext());
4967 CharUnits CstSize = CharUnits::fromQuantity(
4968 getDataLayout().getTypeAllocSize(Init->getType()));
4969 assert(VarSize == CstSize && "Emitted constant has unexpected size");
4970 #endif
4974 llvm::Type* InitType = Init->getType();
4975 llvm::Constant *Entry =
4976 GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative));
4978 // Strip off pointer casts if we got them.
4979 Entry = Entry->stripPointerCasts();
4981 // Entry is now either a Function or GlobalVariable.
4982 auto *GV = dyn_cast<llvm::GlobalVariable>(Entry);
4984 // We have a definition after a declaration with the wrong type.
4985 // We must make a new GlobalVariable* and update everything that used OldGV
4986 // (a declaration or tentative definition) with the new GlobalVariable*
4987 // (which will be a definition).
4989 // This happens if there is a prototype for a global (e.g.
4990 // "extern int x[];") and then a definition of a different type (e.g.
4991 // "int x[10];"). This also happens when an initializer has a different type
4992 // from the type of the global (this happens with unions).
4993 if (!GV || GV->getValueType() != InitType ||
4994 GV->getType()->getAddressSpace() !=
4995 getContext().getTargetAddressSpace(GetGlobalVarAddressSpace(D))) {
4997 // Move the old entry aside so that we'll create a new one.
4998 Entry->setName(StringRef());
5000 // Make a new global with the correct type, this is now guaranteed to work.
5001 GV = cast<llvm::GlobalVariable>(
5002 GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative))
5003 ->stripPointerCasts());
5005 // Replace all uses of the old global with the new global
5006 llvm::Constant *NewPtrForOldDecl =
5007 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
5008 Entry->getType());
5009 Entry->replaceAllUsesWith(NewPtrForOldDecl);
5011 // Erase the old global, since it is no longer used.
5012 cast<llvm::GlobalValue>(Entry)->eraseFromParent();
5015 MaybeHandleStaticInExternC(D, GV);
5017 if (D->hasAttr<AnnotateAttr>())
5018 AddGlobalAnnotations(D, GV);
5020 // Set the llvm linkage type as appropriate.
5021 llvm::GlobalValue::LinkageTypes Linkage =
5022 getLLVMLinkageVarDefinition(D, GV->isConstant());
5024 // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
5025 // the device. [...]"
5026 // CUDA B.2.2 "The __constant__ qualifier, optionally used together with
5027 // __device__, declares a variable that: [...]
5028 // Is accessible from all the threads within the grid and from the host
5029 // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
5030 // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
5031 if (GV && LangOpts.CUDA) {
5032 if (LangOpts.CUDAIsDevice) {
5033 if (Linkage != llvm::GlobalValue::InternalLinkage &&
5034 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
5035 D->getType()->isCUDADeviceBuiltinSurfaceType() ||
5036 D->getType()->isCUDADeviceBuiltinTextureType()))
5037 GV->setExternallyInitialized(true);
5038 } else {
5039 getCUDARuntime().internalizeDeviceSideVar(D, Linkage);
5041 getCUDARuntime().handleVarRegistration(D, *GV);
5044 GV->setInitializer(Init);
5045 if (emitter)
5046 emitter->finalize(GV);
5048 // If it is safe to mark the global 'constant', do so now.
5049 GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
5050 isTypeConstant(D->getType(), true, true));
5052 // If it is in a read-only section, mark it 'constant'.
5053 if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
5054 const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()];
5055 if ((SI.SectionFlags & ASTContext::PSF_Write) == 0)
5056 GV->setConstant(true);
5059 CharUnits AlignVal = getContext().getDeclAlign(D);
5060 // Check for alignment specifed in an 'omp allocate' directive.
5061 if (std::optional<CharUnits> AlignValFromAllocate =
5062 getOMPAllocateAlignment(D))
5063 AlignVal = *AlignValFromAllocate;
5064 GV->setAlignment(AlignVal.getAsAlign());
5066 // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
5067 // function is only defined alongside the variable, not also alongside
5068 // callers. Normally, all accesses to a thread_local go through the
5069 // thread-wrapper in order to ensure initialization has occurred, underlying
5070 // variable will never be used other than the thread-wrapper, so it can be
5071 // converted to internal linkage.
5073 // However, if the variable has the 'constinit' attribute, it _can_ be
5074 // referenced directly, without calling the thread-wrapper, so the linkage
5075 // must not be changed.
5077 // Additionally, if the variable isn't plain external linkage, e.g. if it's
5078 // weak or linkonce, the de-duplication semantics are important to preserve,
5079 // so we don't change the linkage.
5080 if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
5081 Linkage == llvm::GlobalValue::ExternalLinkage &&
5082 Context.getTargetInfo().getTriple().isOSDarwin() &&
5083 !D->hasAttr<ConstInitAttr>())
5084 Linkage = llvm::GlobalValue::InternalLinkage;
5086 GV->setLinkage(Linkage);
5087 if (D->hasAttr<DLLImportAttr>())
5088 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
5089 else if (D->hasAttr<DLLExportAttr>())
5090 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
5091 else
5092 GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
5094 if (Linkage == llvm::GlobalVariable::CommonLinkage) {
5095 // common vars aren't constant even if declared const.
5096 GV->setConstant(false);
5097 // Tentative definition of global variables may be initialized with
5098 // non-zero null pointers. In this case they should have weak linkage
5099 // since common linkage must have zero initializer and must not have
5100 // explicit section therefore cannot have non-zero initial value.
5101 if (!GV->getInitializer()->isNullValue())
5102 GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
5105 setNonAliasAttributes(D, GV);
5107 if (D->getTLSKind() && !GV->isThreadLocal()) {
5108 if (D->getTLSKind() == VarDecl::TLS_Dynamic)
5109 CXXThreadLocals.push_back(D);
5110 setTLSMode(GV, *D);
5113 maybeSetTrivialComdat(*D, *GV);
5115 // Emit the initializer function if necessary.
5116 if (NeedsGlobalCtor || NeedsGlobalDtor)
5117 EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
5119 SanitizerMD->reportGlobal(GV, *D, NeedsGlobalCtor);
5121 // Emit global variable debug information.
5122 if (CGDebugInfo *DI = getModuleDebugInfo())
5123 if (getCodeGenOpts().hasReducedDebugInfo())
5124 DI->EmitGlobalVariable(GV, D);
5127 void CodeGenModule::EmitExternalVarDeclaration(const VarDecl *D) {
5128 if (CGDebugInfo *DI = getModuleDebugInfo())
5129 if (getCodeGenOpts().hasReducedDebugInfo()) {
5130 QualType ASTTy = D->getType();
5131 llvm::Type *Ty = getTypes().ConvertTypeForMem(D->getType());
5132 llvm::Constant *GV =
5133 GetOrCreateLLVMGlobal(D->getName(), Ty, ASTTy.getAddressSpace(), D);
5134 DI->EmitExternalVariable(
5135 cast<llvm::GlobalVariable>(GV->stripPointerCasts()), D);
5139 static bool isVarDeclStrongDefinition(const ASTContext &Context,
5140 CodeGenModule &CGM, const VarDecl *D,
5141 bool NoCommon) {
5142 // Don't give variables common linkage if -fno-common was specified unless it
5143 // was overridden by a NoCommon attribute.
5144 if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>())
5145 return true;
5147 // C11 6.9.2/2:
5148 // A declaration of an identifier for an object that has file scope without
5149 // an initializer, and without a storage-class specifier or with the
5150 // storage-class specifier static, constitutes a tentative definition.
5151 if (D->getInit() || D->hasExternalStorage())
5152 return true;
5154 // A variable cannot be both common and exist in a section.
5155 if (D->hasAttr<SectionAttr>())
5156 return true;
5158 // A variable cannot be both common and exist in a section.
5159 // We don't try to determine which is the right section in the front-end.
5160 // If no specialized section name is applicable, it will resort to default.
5161 if (D->hasAttr<PragmaClangBSSSectionAttr>() ||
5162 D->hasAttr<PragmaClangDataSectionAttr>() ||
5163 D->hasAttr<PragmaClangRelroSectionAttr>() ||
5164 D->hasAttr<PragmaClangRodataSectionAttr>())
5165 return true;
5167 // Thread local vars aren't considered common linkage.
5168 if (D->getTLSKind())
5169 return true;
5171 // Tentative definitions marked with WeakImportAttr are true definitions.
5172 if (D->hasAttr<WeakImportAttr>())
5173 return true;
5175 // A variable cannot be both common and exist in a comdat.
5176 if (shouldBeInCOMDAT(CGM, *D))
5177 return true;
5179 // Declarations with a required alignment do not have common linkage in MSVC
5180 // mode.
5181 if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
5182 if (D->hasAttr<AlignedAttr>())
5183 return true;
5184 QualType VarType = D->getType();
5185 if (Context.isAlignmentRequired(VarType))
5186 return true;
5188 if (const auto *RT = VarType->getAs<RecordType>()) {
5189 const RecordDecl *RD = RT->getDecl();
5190 for (const FieldDecl *FD : RD->fields()) {
5191 if (FD->isBitField())
5192 continue;
5193 if (FD->hasAttr<AlignedAttr>())
5194 return true;
5195 if (Context.isAlignmentRequired(FD->getType()))
5196 return true;
5201 // Microsoft's link.exe doesn't support alignments greater than 32 bytes for
5202 // common symbols, so symbols with greater alignment requirements cannot be
5203 // common.
5204 // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
5205 // alignments for common symbols via the aligncomm directive, so this
5206 // restriction only applies to MSVC environments.
5207 if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
5208 Context.getTypeAlignIfKnown(D->getType()) >
5209 Context.toBits(CharUnits::fromQuantity(32)))
5210 return true;
5212 return false;
5215 llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
5216 const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) {
5217 if (Linkage == GVA_Internal)
5218 return llvm::Function::InternalLinkage;
5220 if (D->hasAttr<WeakAttr>())
5221 return llvm::GlobalVariable::WeakAnyLinkage;
5223 if (const auto *FD = D->getAsFunction())
5224 if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally)
5225 return llvm::GlobalVariable::LinkOnceAnyLinkage;
5227 // We are guaranteed to have a strong definition somewhere else,
5228 // so we can use available_externally linkage.
5229 if (Linkage == GVA_AvailableExternally)
5230 return llvm::GlobalValue::AvailableExternallyLinkage;
5232 // Note that Apple's kernel linker doesn't support symbol
5233 // coalescing, so we need to avoid linkonce and weak linkages there.
5234 // Normally, this means we just map to internal, but for explicit
5235 // instantiations we'll map to external.
5237 // In C++, the compiler has to emit a definition in every translation unit
5238 // that references the function. We should use linkonce_odr because
5239 // a) if all references in this translation unit are optimized away, we
5240 // don't need to codegen it. b) if the function persists, it needs to be
5241 // merged with other definitions. c) C++ has the ODR, so we know the
5242 // definition is dependable.
5243 if (Linkage == GVA_DiscardableODR)
5244 return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage
5245 : llvm::Function::InternalLinkage;
5247 // An explicit instantiation of a template has weak linkage, since
5248 // explicit instantiations can occur in multiple translation units
5249 // and must all be equivalent. However, we are not allowed to
5250 // throw away these explicit instantiations.
5252 // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU,
5253 // so say that CUDA templates are either external (for kernels) or internal.
5254 // This lets llvm perform aggressive inter-procedural optimizations. For
5255 // -fgpu-rdc case, device function calls across multiple TU's are allowed,
5256 // therefore we need to follow the normal linkage paradigm.
5257 if (Linkage == GVA_StrongODR) {
5258 if (getLangOpts().AppleKext)
5259 return llvm::Function::ExternalLinkage;
5260 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
5261 !getLangOpts().GPURelocatableDeviceCode)
5262 return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage
5263 : llvm::Function::InternalLinkage;
5264 return llvm::Function::WeakODRLinkage;
5267 // C++ doesn't have tentative definitions and thus cannot have common
5268 // linkage.
5269 if (!getLangOpts().CPlusPlus && isa<VarDecl>(D) &&
5270 !isVarDeclStrongDefinition(Context, *this, cast<VarDecl>(D),
5271 CodeGenOpts.NoCommon))
5272 return llvm::GlobalVariable::CommonLinkage;
5274 // selectany symbols are externally visible, so use weak instead of
5275 // linkonce. MSVC optimizes away references to const selectany globals, so
5276 // all definitions should be the same and ODR linkage should be used.
5277 // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx
5278 if (D->hasAttr<SelectAnyAttr>())
5279 return llvm::GlobalVariable::WeakODRLinkage;
5281 // Otherwise, we have strong external linkage.
5282 assert(Linkage == GVA_StrongExternal);
5283 return llvm::GlobalVariable::ExternalLinkage;
5286 llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageVarDefinition(
5287 const VarDecl *VD, bool IsConstant) {
5288 GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD);
5289 return getLLVMLinkageForDeclarator(VD, Linkage, IsConstant);
5292 /// Replace the uses of a function that was declared with a non-proto type.
5293 /// We want to silently drop extra arguments from call sites
5294 static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
5295 llvm::Function *newFn) {
5296 // Fast path.
5297 if (old->use_empty()) return;
5299 llvm::Type *newRetTy = newFn->getReturnType();
5300 SmallVector<llvm::Value*, 4> newArgs;
5302 for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
5303 ui != ue; ) {
5304 llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
5305 llvm::User *user = use->getUser();
5307 // Recognize and replace uses of bitcasts. Most calls to
5308 // unprototyped functions will use bitcasts.
5309 if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(user)) {
5310 if (bitcast->getOpcode() == llvm::Instruction::BitCast)
5311 replaceUsesOfNonProtoConstant(bitcast, newFn);
5312 continue;
5315 // Recognize calls to the function.
5316 llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(user);
5317 if (!callSite) continue;
5318 if (!callSite->isCallee(&*use))
5319 continue;
5321 // If the return types don't match exactly, then we can't
5322 // transform this call unless it's dead.
5323 if (callSite->getType() != newRetTy && !callSite->use_empty())
5324 continue;
5326 // Get the call site's attribute list.
5327 SmallVector<llvm::AttributeSet, 8> newArgAttrs;
5328 llvm::AttributeList oldAttrs = callSite->getAttributes();
5330 // If the function was passed too few arguments, don't transform.
5331 unsigned newNumArgs = newFn->arg_size();
5332 if (callSite->arg_size() < newNumArgs)
5333 continue;
5335 // If extra arguments were passed, we silently drop them.
5336 // If any of the types mismatch, we don't transform.
5337 unsigned argNo = 0;
5338 bool dontTransform = false;
5339 for (llvm::Argument &A : newFn->args()) {
5340 if (callSite->getArgOperand(argNo)->getType() != A.getType()) {
5341 dontTransform = true;
5342 break;
5345 // Add any parameter attributes.
5346 newArgAttrs.push_back(oldAttrs.getParamAttrs(argNo));
5347 argNo++;
5349 if (dontTransform)
5350 continue;
5352 // Okay, we can transform this. Create the new call instruction and copy
5353 // over the required information.
5354 newArgs.append(callSite->arg_begin(), callSite->arg_begin() + argNo);
5356 // Copy over any operand bundles.
5357 SmallVector<llvm::OperandBundleDef, 1> newBundles;
5358 callSite->getOperandBundlesAsDefs(newBundles);
5360 llvm::CallBase *newCall;
5361 if (isa<llvm::CallInst>(callSite)) {
5362 newCall =
5363 llvm::CallInst::Create(newFn, newArgs, newBundles, "", callSite);
5364 } else {
5365 auto *oldInvoke = cast<llvm::InvokeInst>(callSite);
5366 newCall = llvm::InvokeInst::Create(newFn, oldInvoke->getNormalDest(),
5367 oldInvoke->getUnwindDest(), newArgs,
5368 newBundles, "", callSite);
5370 newArgs.clear(); // for the next iteration
5372 if (!newCall->getType()->isVoidTy())
5373 newCall->takeName(callSite);
5374 newCall->setAttributes(
5375 llvm::AttributeList::get(newFn->getContext(), oldAttrs.getFnAttrs(),
5376 oldAttrs.getRetAttrs(), newArgAttrs));
5377 newCall->setCallingConv(callSite->getCallingConv());
5379 // Finally, remove the old call, replacing any uses with the new one.
5380 if (!callSite->use_empty())
5381 callSite->replaceAllUsesWith(newCall);
5383 // Copy debug location attached to CI.
5384 if (callSite->getDebugLoc())
5385 newCall->setDebugLoc(callSite->getDebugLoc());
5387 callSite->eraseFromParent();
5391 /// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
5392 /// implement a function with no prototype, e.g. "int foo() {}". If there are
5393 /// existing call uses of the old function in the module, this adjusts them to
5394 /// call the new function directly.
5396 /// This is not just a cleanup: the always_inline pass requires direct calls to
5397 /// functions to be able to inline them. If there is a bitcast in the way, it
5398 /// won't inline them. Instcombine normally deletes these calls, but it isn't
5399 /// run at -O0.
5400 static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
5401 llvm::Function *NewFn) {
5402 // If we're redefining a global as a function, don't transform it.
5403 if (!isa<llvm::Function>(Old)) return;
5405 replaceUsesOfNonProtoConstant(Old, NewFn);
5408 void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
5409 auto DK = VD->isThisDeclarationADefinition();
5410 if (DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>())
5411 return;
5413 TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
5414 // If we have a definition, this might be a deferred decl. If the
5415 // instantiation is explicit, make sure we emit it at the end.
5416 if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition)
5417 GetAddrOfGlobalVar(VD);
5419 EmitTopLevelDecl(VD);
5422 void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
5423 llvm::GlobalValue *GV) {
5424 const auto *D = cast<FunctionDecl>(GD.getDecl());
5426 // Compute the function info and LLVM type.
5427 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
5428 llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
5430 // Get or create the prototype for the function.
5431 if (!GV || (GV->getValueType() != Ty))
5432 GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
5433 /*DontDefer=*/true,
5434 ForDefinition));
5436 // Already emitted.
5437 if (!GV->isDeclaration())
5438 return;
5440 // We need to set linkage and visibility on the function before
5441 // generating code for it because various parts of IR generation
5442 // want to propagate this information down (e.g. to local static
5443 // declarations).
5444 auto *Fn = cast<llvm::Function>(GV);
5445 setFunctionLinkage(GD, Fn);
5447 // FIXME: this is redundant with part of setFunctionDefinitionAttributes
5448 setGVProperties(Fn, GD);
5450 MaybeHandleStaticInExternC(D, Fn);
5452 maybeSetTrivialComdat(*D, *Fn);
5454 // Set CodeGen attributes that represent floating point environment.
5455 setLLVMFunctionFEnvAttributes(D, Fn);
5457 CodeGenFunction(*this).GenerateCode(GD, Fn, FI);
5459 setNonAliasAttributes(GD, Fn);
5460 SetLLVMFunctionAttributesForDefinition(D, Fn);
5462 if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
5463 AddGlobalCtor(Fn, CA->getPriority());
5464 if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
5465 AddGlobalDtor(Fn, DA->getPriority(), true);
5466 if (D->hasAttr<AnnotateAttr>())
5467 AddGlobalAnnotations(D, Fn);
5470 void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
5471 const auto *D = cast<ValueDecl>(GD.getDecl());
5472 const AliasAttr *AA = D->getAttr<AliasAttr>();
5473 assert(AA && "Not an alias?");
5475 StringRef MangledName = getMangledName(GD);
5477 if (AA->getAliasee() == MangledName) {
5478 Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
5479 return;
5482 // If there is a definition in the module, then it wins over the alias.
5483 // This is dubious, but allow it to be safe. Just ignore the alias.
5484 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
5485 if (Entry && !Entry->isDeclaration())
5486 return;
5488 Aliases.push_back(GD);
5490 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
5492 // Create a reference to the named value. This ensures that it is emitted
5493 // if a deferred decl.
5494 llvm::Constant *Aliasee;
5495 llvm::GlobalValue::LinkageTypes LT;
5496 if (isa<llvm::FunctionType>(DeclTy)) {
5497 Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GD,
5498 /*ForVTable=*/false);
5499 LT = getFunctionLinkage(GD);
5500 } else {
5501 Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, LangAS::Default,
5502 /*D=*/nullptr);
5503 if (const auto *VD = dyn_cast<VarDecl>(GD.getDecl()))
5504 LT = getLLVMLinkageVarDefinition(VD, D->getType().isConstQualified());
5505 else
5506 LT = getFunctionLinkage(GD);
5509 // Create the new alias itself, but don't set a name yet.
5510 unsigned AS = Aliasee->getType()->getPointerAddressSpace();
5511 auto *GA =
5512 llvm::GlobalAlias::create(DeclTy, AS, LT, "", Aliasee, &getModule());
5514 if (Entry) {
5515 if (GA->getAliasee() == Entry) {
5516 Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
5517 return;
5520 assert(Entry->isDeclaration());
5522 // If there is a declaration in the module, then we had an extern followed
5523 // by the alias, as in:
5524 // extern int test6();
5525 // ...
5526 // int test6() __attribute__((alias("test7")));
5528 // Remove it and replace uses of it with the alias.
5529 GA->takeName(Entry);
5531 Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
5532 Entry->getType()));
5533 Entry->eraseFromParent();
5534 } else {
5535 GA->setName(MangledName);
5538 // Set attributes which are particular to an alias; this is a
5539 // specialization of the attributes which may be set on a global
5540 // variable/function.
5541 if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
5542 D->isWeakImported()) {
5543 GA->setLinkage(llvm::Function::WeakAnyLinkage);
5546 if (const auto *VD = dyn_cast<VarDecl>(D))
5547 if (VD->getTLSKind())
5548 setTLSMode(GA, *VD);
5550 SetCommonAttributes(GD, GA);
5552 // Emit global alias debug information.
5553 if (isa<VarDecl>(D))
5554 if (CGDebugInfo *DI = getModuleDebugInfo())
5555 DI->EmitGlobalAlias(cast<llvm::GlobalValue>(GA->getAliasee()->stripPointerCasts()), GD);
5558 void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
5559 const auto *D = cast<ValueDecl>(GD.getDecl());
5560 const IFuncAttr *IFA = D->getAttr<IFuncAttr>();
5561 assert(IFA && "Not an ifunc?");
5563 StringRef MangledName = getMangledName(GD);
5565 if (IFA->getResolver() == MangledName) {
5566 Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
5567 return;
5570 // Report an error if some definition overrides ifunc.
5571 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
5572 if (Entry && !Entry->isDeclaration()) {
5573 GlobalDecl OtherGD;
5574 if (lookupRepresentativeDecl(MangledName, OtherGD) &&
5575 DiagnosedConflictingDefinitions.insert(GD).second) {
5576 Diags.Report(D->getLocation(), diag::err_duplicate_mangled_name)
5577 << MangledName;
5578 Diags.Report(OtherGD.getDecl()->getLocation(),
5579 diag::note_previous_definition);
5581 return;
5584 Aliases.push_back(GD);
5586 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
5587 llvm::Type *ResolverTy = llvm::GlobalIFunc::getResolverFunctionType(DeclTy);
5588 llvm::Constant *Resolver =
5589 GetOrCreateLLVMFunction(IFA->getResolver(), ResolverTy, {},
5590 /*ForVTable=*/false);
5591 llvm::GlobalIFunc *GIF =
5592 llvm::GlobalIFunc::create(DeclTy, 0, llvm::Function::ExternalLinkage,
5593 "", Resolver, &getModule());
5594 if (Entry) {
5595 if (GIF->getResolver() == Entry) {
5596 Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
5597 return;
5599 assert(Entry->isDeclaration());
5601 // If there is a declaration in the module, then we had an extern followed
5602 // by the ifunc, as in:
5603 // extern int test();
5604 // ...
5605 // int test() __attribute__((ifunc("resolver")));
5607 // Remove it and replace uses of it with the ifunc.
5608 GIF->takeName(Entry);
5610 Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GIF,
5611 Entry->getType()));
5612 Entry->eraseFromParent();
5613 } else
5614 GIF->setName(MangledName);
5616 SetCommonAttributes(GD, GIF);
5619 llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
5620 ArrayRef<llvm::Type*> Tys) {
5621 return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
5622 Tys);
5625 static llvm::StringMapEntry<llvm::GlobalVariable *> &
5626 GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
5627 const StringLiteral *Literal, bool TargetIsLSB,
5628 bool &IsUTF16, unsigned &StringLength) {
5629 StringRef String = Literal->getString();
5630 unsigned NumBytes = String.size();
5632 // Check for simple case.
5633 if (!Literal->containsNonAsciiOrNull()) {
5634 StringLength = NumBytes;
5635 return *Map.insert(std::make_pair(String, nullptr)).first;
5638 // Otherwise, convert the UTF8 literals into a string of shorts.
5639 IsUTF16 = true;
5641 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
5642 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
5643 llvm::UTF16 *ToPtr = &ToBuf[0];
5645 (void)llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
5646 ToPtr + NumBytes, llvm::strictConversion);
5648 // ConvertUTF8toUTF16 returns the length in ToPtr.
5649 StringLength = ToPtr - &ToBuf[0];
5651 // Add an explicit null.
5652 *ToPtr = 0;
5653 return *Map.insert(std::make_pair(
5654 StringRef(reinterpret_cast<const char *>(ToBuf.data()),
5655 (StringLength + 1) * 2),
5656 nullptr)).first;
5659 ConstantAddress
5660 CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
5661 unsigned StringLength = 0;
5662 bool isUTF16 = false;
5663 llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
5664 GetConstantCFStringEntry(CFConstantStringMap, Literal,
5665 getDataLayout().isLittleEndian(), isUTF16,
5666 StringLength);
5668 if (auto *C = Entry.second)
5669 return ConstantAddress(
5670 C, C->getValueType(), CharUnits::fromQuantity(C->getAlignment()));
5672 llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
5673 llvm::Constant *Zeros[] = { Zero, Zero };
5675 const ASTContext &Context = getContext();
5676 const llvm::Triple &Triple = getTriple();
5678 const auto CFRuntime = getLangOpts().CFRuntime;
5679 const bool IsSwiftABI =
5680 static_cast<unsigned>(CFRuntime) >=
5681 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift);
5682 const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1;
5684 // If we don't already have it, get __CFConstantStringClassReference.
5685 if (!CFConstantStringClassRef) {
5686 const char *CFConstantStringClassName = "__CFConstantStringClassReference";
5687 llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
5688 Ty = llvm::ArrayType::get(Ty, 0);
5690 switch (CFRuntime) {
5691 default: break;
5692 case LangOptions::CoreFoundationABI::Swift: [[fallthrough]];
5693 case LangOptions::CoreFoundationABI::Swift5_0:
5694 CFConstantStringClassName =
5695 Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN"
5696 : "$s10Foundation19_NSCFConstantStringCN";
5697 Ty = IntPtrTy;
5698 break;
5699 case LangOptions::CoreFoundationABI::Swift4_2:
5700 CFConstantStringClassName =
5701 Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN"
5702 : "$S10Foundation19_NSCFConstantStringCN";
5703 Ty = IntPtrTy;
5704 break;
5705 case LangOptions::CoreFoundationABI::Swift4_1:
5706 CFConstantStringClassName =
5707 Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN"
5708 : "__T010Foundation19_NSCFConstantStringCN";
5709 Ty = IntPtrTy;
5710 break;
5713 llvm::Constant *C = CreateRuntimeVariable(Ty, CFConstantStringClassName);
5715 if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) {
5716 llvm::GlobalValue *GV = nullptr;
5718 if ((GV = dyn_cast<llvm::GlobalValue>(C))) {
5719 IdentifierInfo &II = Context.Idents.get(GV->getName());
5720 TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl();
5721 DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
5723 const VarDecl *VD = nullptr;
5724 for (const auto *Result : DC->lookup(&II))
5725 if ((VD = dyn_cast<VarDecl>(Result)))
5726 break;
5728 if (Triple.isOSBinFormatELF()) {
5729 if (!VD)
5730 GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
5731 } else {
5732 GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
5733 if (!VD || !VD->hasAttr<DLLExportAttr>())
5734 GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
5735 else
5736 GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
5739 setDSOLocal(GV);
5743 // Decay array -> ptr
5744 CFConstantStringClassRef =
5745 IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty)
5746 : llvm::ConstantExpr::getGetElementPtr(Ty, C, Zeros);
5749 QualType CFTy = Context.getCFConstantStringType();
5751 auto *STy = cast<llvm::StructType>(getTypes().ConvertType(CFTy));
5753 ConstantInitBuilder Builder(*this);
5754 auto Fields = Builder.beginStruct(STy);
5756 // Class pointer.
5757 Fields.add(cast<llvm::Constant>(CFConstantStringClassRef));
5759 // Flags.
5760 if (IsSwiftABI) {
5761 Fields.addInt(IntPtrTy, IsSwift4_1 ? 0x05 : 0x01);
5762 Fields.addInt(Int64Ty, isUTF16 ? 0x07d0 : 0x07c8);
5763 } else {
5764 Fields.addInt(IntTy, isUTF16 ? 0x07d0 : 0x07C8);
5767 // String pointer.
5768 llvm::Constant *C = nullptr;
5769 if (isUTF16) {
5770 auto Arr = llvm::ArrayRef(
5771 reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
5772 Entry.first().size() / 2);
5773 C = llvm::ConstantDataArray::get(VMContext, Arr);
5774 } else {
5775 C = llvm::ConstantDataArray::getString(VMContext, Entry.first());
5778 // Note: -fwritable-strings doesn't make the backing store strings of
5779 // CFStrings writable. (See <rdar://problem/10657500>)
5780 auto *GV =
5781 new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
5782 llvm::GlobalValue::PrivateLinkage, C, ".str");
5783 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
5784 // Don't enforce the target's minimum global alignment, since the only use
5785 // of the string is via this class initializer.
5786 CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(Context.ShortTy)
5787 : Context.getTypeAlignInChars(Context.CharTy);
5788 GV->setAlignment(Align.getAsAlign());
5790 // FIXME: We set the section explicitly to avoid a bug in ld64 224.1.
5791 // Without it LLVM can merge the string with a non unnamed_addr one during
5792 // LTO. Doing that changes the section it ends in, which surprises ld64.
5793 if (Triple.isOSBinFormatMachO())
5794 GV->setSection(isUTF16 ? "__TEXT,__ustring"
5795 : "__TEXT,__cstring,cstring_literals");
5796 // Make sure the literal ends up in .rodata to allow for safe ICF and for
5797 // the static linker to adjust permissions to read-only later on.
5798 else if (Triple.isOSBinFormatELF())
5799 GV->setSection(".rodata");
5801 // String.
5802 llvm::Constant *Str =
5803 llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
5805 if (isUTF16)
5806 // Cast the UTF16 string to the correct type.
5807 Str = llvm::ConstantExpr::getBitCast(Str, Int8PtrTy);
5808 Fields.add(Str);
5810 // String length.
5811 llvm::IntegerType *LengthTy =
5812 llvm::IntegerType::get(getModule().getContext(),
5813 Context.getTargetInfo().getLongWidth());
5814 if (IsSwiftABI) {
5815 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
5816 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
5817 LengthTy = Int32Ty;
5818 else
5819 LengthTy = IntPtrTy;
5821 Fields.addInt(LengthTy, StringLength);
5823 // Swift ABI requires 8-byte alignment to ensure that the _Atomic(uint64_t) is
5824 // properly aligned on 32-bit platforms.
5825 CharUnits Alignment =
5826 IsSwiftABI ? Context.toCharUnitsFromBits(64) : getPointerAlign();
5828 // The struct.
5829 GV = Fields.finishAndCreateGlobal("_unnamed_cfstring_", Alignment,
5830 /*isConstant=*/false,
5831 llvm::GlobalVariable::PrivateLinkage);
5832 GV->addAttribute("objc_arc_inert");
5833 switch (Triple.getObjectFormat()) {
5834 case llvm::Triple::UnknownObjectFormat:
5835 llvm_unreachable("unknown file format");
5836 case llvm::Triple::DXContainer:
5837 case llvm::Triple::GOFF:
5838 case llvm::Triple::SPIRV:
5839 case llvm::Triple::XCOFF:
5840 llvm_unreachable("unimplemented");
5841 case llvm::Triple::COFF:
5842 case llvm::Triple::ELF:
5843 case llvm::Triple::Wasm:
5844 GV->setSection("cfstring");
5845 break;
5846 case llvm::Triple::MachO:
5847 GV->setSection("__DATA,__cfstring");
5848 break;
5850 Entry.second = GV;
5852 return ConstantAddress(GV, GV->getValueType(), Alignment);
5855 bool CodeGenModule::getExpressionLocationsEnabled() const {
5856 return !CodeGenOpts.EmitCodeView || CodeGenOpts.DebugColumnInfo;
5859 QualType CodeGenModule::getObjCFastEnumerationStateType() {
5860 if (ObjCFastEnumerationStateType.isNull()) {
5861 RecordDecl *D = Context.buildImplicitRecord("__objcFastEnumerationState");
5862 D->startDefinition();
5864 QualType FieldTypes[] = {
5865 Context.UnsignedLongTy,
5866 Context.getPointerType(Context.getObjCIdType()),
5867 Context.getPointerType(Context.UnsignedLongTy),
5868 Context.getConstantArrayType(Context.UnsignedLongTy,
5869 llvm::APInt(32, 5), nullptr, ArrayType::Normal, 0)
5872 for (size_t i = 0; i < 4; ++i) {
5873 FieldDecl *Field = FieldDecl::Create(Context,
5875 SourceLocation(),
5876 SourceLocation(), nullptr,
5877 FieldTypes[i], /*TInfo=*/nullptr,
5878 /*BitWidth=*/nullptr,
5879 /*Mutable=*/false,
5880 ICIS_NoInit);
5881 Field->setAccess(AS_public);
5882 D->addDecl(Field);
5885 D->completeDefinition();
5886 ObjCFastEnumerationStateType = Context.getTagDeclType(D);
5889 return ObjCFastEnumerationStateType;
5892 llvm::Constant *
5893 CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
5894 assert(!E->getType()->isPointerType() && "Strings are always arrays");
5896 // Don't emit it as the address of the string, emit the string data itself
5897 // as an inline array.
5898 if (E->getCharByteWidth() == 1) {
5899 SmallString<64> Str(E->getString());
5901 // Resize the string to the right size, which is indicated by its type.
5902 const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
5903 assert(CAT && "String literal not of constant array type!");
5904 Str.resize(CAT->getSize().getZExtValue());
5905 return llvm::ConstantDataArray::getString(VMContext, Str, false);
5908 auto *AType = cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
5909 llvm::Type *ElemTy = AType->getElementType();
5910 unsigned NumElements = AType->getNumElements();
5912 // Wide strings have either 2-byte or 4-byte elements.
5913 if (ElemTy->getPrimitiveSizeInBits() == 16) {
5914 SmallVector<uint16_t, 32> Elements;
5915 Elements.reserve(NumElements);
5917 for(unsigned i = 0, e = E->getLength(); i != e; ++i)
5918 Elements.push_back(E->getCodeUnit(i));
5919 Elements.resize(NumElements);
5920 return llvm::ConstantDataArray::get(VMContext, Elements);
5923 assert(ElemTy->getPrimitiveSizeInBits() == 32);
5924 SmallVector<uint32_t, 32> Elements;
5925 Elements.reserve(NumElements);
5927 for(unsigned i = 0, e = E->getLength(); i != e; ++i)
5928 Elements.push_back(E->getCodeUnit(i));
5929 Elements.resize(NumElements);
5930 return llvm::ConstantDataArray::get(VMContext, Elements);
5933 static llvm::GlobalVariable *
5934 GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
5935 CodeGenModule &CGM, StringRef GlobalName,
5936 CharUnits Alignment) {
5937 unsigned AddrSpace = CGM.getContext().getTargetAddressSpace(
5938 CGM.GetGlobalConstantAddressSpace());
5940 llvm::Module &M = CGM.getModule();
5941 // Create a global variable for this string
5942 auto *GV = new llvm::GlobalVariable(
5943 M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName,
5944 nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace);
5945 GV->setAlignment(Alignment.getAsAlign());
5946 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
5947 if (GV->isWeakForLinker()) {
5948 assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals");
5949 GV->setComdat(M.getOrInsertComdat(GV->getName()));
5951 CGM.setDSOLocal(GV);
5953 return GV;
5956 /// GetAddrOfConstantStringFromLiteral - Return a pointer to a
5957 /// constant array for the given string literal.
5958 ConstantAddress
5959 CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
5960 StringRef Name) {
5961 CharUnits Alignment = getContext().getAlignOfGlobalVarInChars(S->getType());
5963 llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
5964 llvm::GlobalVariable **Entry = nullptr;
5965 if (!LangOpts.WritableStrings) {
5966 Entry = &ConstantStringMap[C];
5967 if (auto GV = *Entry) {
5968 if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
5969 GV->setAlignment(Alignment.getAsAlign());
5970 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
5971 GV->getValueType(), Alignment);
5975 SmallString<256> MangledNameBuffer;
5976 StringRef GlobalVariableName;
5977 llvm::GlobalValue::LinkageTypes LT;
5979 // Mangle the string literal if that's how the ABI merges duplicate strings.
5980 // Don't do it if they are writable, since we don't want writes in one TU to
5981 // affect strings in another.
5982 if (getCXXABI().getMangleContext().shouldMangleStringLiteral(S) &&
5983 !LangOpts.WritableStrings) {
5984 llvm::raw_svector_ostream Out(MangledNameBuffer);
5985 getCXXABI().getMangleContext().mangleStringLiteral(S, Out);
5986 LT = llvm::GlobalValue::LinkOnceODRLinkage;
5987 GlobalVariableName = MangledNameBuffer;
5988 } else {
5989 LT = llvm::GlobalValue::PrivateLinkage;
5990 GlobalVariableName = Name;
5993 auto GV = GenerateStringLiteral(C, LT, *this, GlobalVariableName, Alignment);
5995 CGDebugInfo *DI = getModuleDebugInfo();
5996 if (DI && getCodeGenOpts().hasReducedDebugInfo())
5997 DI->AddStringLiteralDebugInfo(GV, S);
5999 if (Entry)
6000 *Entry = GV;
6002 SanitizerMD->reportGlobal(GV, S->getStrTokenLoc(0), "<string literal>");
6004 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
6005 GV->getValueType(), Alignment);
6008 /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
6009 /// array for the given ObjCEncodeExpr node.
6010 ConstantAddress
6011 CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
6012 std::string Str;
6013 getContext().getObjCEncodingForType(E->getEncodedType(), Str);
6015 return GetAddrOfConstantCString(Str);
6018 /// GetAddrOfConstantCString - Returns a pointer to a character array containing
6019 /// the literal and a terminating '\0' character.
6020 /// The result has pointer to array type.
6021 ConstantAddress CodeGenModule::GetAddrOfConstantCString(
6022 const std::string &Str, const char *GlobalName) {
6023 StringRef StrWithNull(Str.c_str(), Str.size() + 1);
6024 CharUnits Alignment =
6025 getContext().getAlignOfGlobalVarInChars(getContext().CharTy);
6027 llvm::Constant *C =
6028 llvm::ConstantDataArray::getString(getLLVMContext(), StrWithNull, false);
6030 // Don't share any string literals if strings aren't constant.
6031 llvm::GlobalVariable **Entry = nullptr;
6032 if (!LangOpts.WritableStrings) {
6033 Entry = &ConstantStringMap[C];
6034 if (auto GV = *Entry) {
6035 if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
6036 GV->setAlignment(Alignment.getAsAlign());
6037 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
6038 GV->getValueType(), Alignment);
6042 // Get the default prefix if a name wasn't specified.
6043 if (!GlobalName)
6044 GlobalName = ".str";
6045 // Create a global variable for this.
6046 auto GV = GenerateStringLiteral(C, llvm::GlobalValue::PrivateLinkage, *this,
6047 GlobalName, Alignment);
6048 if (Entry)
6049 *Entry = GV;
6051 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
6052 GV->getValueType(), Alignment);
6055 ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
6056 const MaterializeTemporaryExpr *E, const Expr *Init) {
6057 assert((E->getStorageDuration() == SD_Static ||
6058 E->getStorageDuration() == SD_Thread) && "not a global temporary");
6059 const auto *VD = cast<VarDecl>(E->getExtendingDecl());
6061 // If we're not materializing a subobject of the temporary, keep the
6062 // cv-qualifiers from the type of the MaterializeTemporaryExpr.
6063 QualType MaterializedType = Init->getType();
6064 if (Init == E->getSubExpr())
6065 MaterializedType = E->getType();
6067 CharUnits Align = getContext().getTypeAlignInChars(MaterializedType);
6069 auto InsertResult = MaterializedGlobalTemporaryMap.insert({E, nullptr});
6070 if (!InsertResult.second) {
6071 // We've seen this before: either we already created it or we're in the
6072 // process of doing so.
6073 if (!InsertResult.first->second) {
6074 // We recursively re-entered this function, probably during emission of
6075 // the initializer. Create a placeholder. We'll clean this up in the
6076 // outer call, at the end of this function.
6077 llvm::Type *Type = getTypes().ConvertTypeForMem(MaterializedType);
6078 InsertResult.first->second = new llvm::GlobalVariable(
6079 getModule(), Type, false, llvm::GlobalVariable::InternalLinkage,
6080 nullptr);
6082 return ConstantAddress(InsertResult.first->second,
6083 llvm::cast<llvm::GlobalVariable>(
6084 InsertResult.first->second->stripPointerCasts())
6085 ->getValueType(),
6086 Align);
6089 // FIXME: If an externally-visible declaration extends multiple temporaries,
6090 // we need to give each temporary the same name in every translation unit (and
6091 // we also need to make the temporaries externally-visible).
6092 SmallString<256> Name;
6093 llvm::raw_svector_ostream Out(Name);
6094 getCXXABI().getMangleContext().mangleReferenceTemporary(
6095 VD, E->getManglingNumber(), Out);
6097 APValue *Value = nullptr;
6098 if (E->getStorageDuration() == SD_Static && VD && VD->evaluateValue()) {
6099 // If the initializer of the extending declaration is a constant
6100 // initializer, we should have a cached constant initializer for this
6101 // temporary. Note that this might have a different value from the value
6102 // computed by evaluating the initializer if the surrounding constant
6103 // expression modifies the temporary.
6104 Value = E->getOrCreateValue(false);
6107 // Try evaluating it now, it might have a constant initializer.
6108 Expr::EvalResult EvalResult;
6109 if (!Value && Init->EvaluateAsRValue(EvalResult, getContext()) &&
6110 !EvalResult.hasSideEffects())
6111 Value = &EvalResult.Val;
6113 LangAS AddrSpace =
6114 VD ? GetGlobalVarAddressSpace(VD) : MaterializedType.getAddressSpace();
6116 std::optional<ConstantEmitter> emitter;
6117 llvm::Constant *InitialValue = nullptr;
6118 bool Constant = false;
6119 llvm::Type *Type;
6120 if (Value) {
6121 // The temporary has a constant initializer, use it.
6122 emitter.emplace(*this);
6123 InitialValue = emitter->emitForInitializer(*Value, AddrSpace,
6124 MaterializedType);
6125 Constant = isTypeConstant(MaterializedType, /*ExcludeCtor*/ Value,
6126 /*ExcludeDtor*/ false);
6127 Type = InitialValue->getType();
6128 } else {
6129 // No initializer, the initialization will be provided when we
6130 // initialize the declaration which performed lifetime extension.
6131 Type = getTypes().ConvertTypeForMem(MaterializedType);
6134 // Create a global variable for this lifetime-extended temporary.
6135 llvm::GlobalValue::LinkageTypes Linkage =
6136 getLLVMLinkageVarDefinition(VD, Constant);
6137 if (Linkage == llvm::GlobalVariable::ExternalLinkage) {
6138 const VarDecl *InitVD;
6139 if (VD->isStaticDataMember() && VD->getAnyInitializer(InitVD) &&
6140 isa<CXXRecordDecl>(InitVD->getLexicalDeclContext())) {
6141 // Temporaries defined inside a class get linkonce_odr linkage because the
6142 // class can be defined in multiple translation units.
6143 Linkage = llvm::GlobalVariable::LinkOnceODRLinkage;
6144 } else {
6145 // There is no need for this temporary to have external linkage if the
6146 // VarDecl has external linkage.
6147 Linkage = llvm::GlobalVariable::InternalLinkage;
6150 auto TargetAS = getContext().getTargetAddressSpace(AddrSpace);
6151 auto *GV = new llvm::GlobalVariable(
6152 getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(),
6153 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
6154 if (emitter) emitter->finalize(GV);
6155 // Don't assign dllimport or dllexport to local linkage globals.
6156 if (!llvm::GlobalValue::isLocalLinkage(Linkage)) {
6157 setGVProperties(GV, VD);
6158 if (GV->getDLLStorageClass() == llvm::GlobalVariable::DLLExportStorageClass)
6159 // The reference temporary should never be dllexport.
6160 GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
6162 GV->setAlignment(Align.getAsAlign());
6163 if (supportsCOMDAT() && GV->isWeakForLinker())
6164 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
6165 if (VD->getTLSKind())
6166 setTLSMode(GV, *VD);
6167 llvm::Constant *CV = GV;
6168 if (AddrSpace != LangAS::Default)
6169 CV = getTargetCodeGenInfo().performAddrSpaceCast(
6170 *this, GV, AddrSpace, LangAS::Default,
6171 Type->getPointerTo(
6172 getContext().getTargetAddressSpace(LangAS::Default)));
6174 // Update the map with the new temporary. If we created a placeholder above,
6175 // replace it with the new global now.
6176 llvm::Constant *&Entry = MaterializedGlobalTemporaryMap[E];
6177 if (Entry) {
6178 Entry->replaceAllUsesWith(
6179 llvm::ConstantExpr::getBitCast(CV, Entry->getType()));
6180 llvm::cast<llvm::GlobalVariable>(Entry)->eraseFromParent();
6182 Entry = CV;
6184 return ConstantAddress(CV, Type, Align);
6187 /// EmitObjCPropertyImplementations - Emit information for synthesized
6188 /// properties for an implementation.
6189 void CodeGenModule::EmitObjCPropertyImplementations(const
6190 ObjCImplementationDecl *D) {
6191 for (const auto *PID : D->property_impls()) {
6192 // Dynamic is just for type-checking.
6193 if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
6194 ObjCPropertyDecl *PD = PID->getPropertyDecl();
6196 // Determine which methods need to be implemented, some may have
6197 // been overridden. Note that ::isPropertyAccessor is not the method
6198 // we want, that just indicates if the decl came from a
6199 // property. What we want to know is if the method is defined in
6200 // this implementation.
6201 auto *Getter = PID->getGetterMethodDecl();
6202 if (!Getter || Getter->isSynthesizedAccessorStub())
6203 CodeGenFunction(*this).GenerateObjCGetter(
6204 const_cast<ObjCImplementationDecl *>(D), PID);
6205 auto *Setter = PID->getSetterMethodDecl();
6206 if (!PD->isReadOnly() && (!Setter || Setter->isSynthesizedAccessorStub()))
6207 CodeGenFunction(*this).GenerateObjCSetter(
6208 const_cast<ObjCImplementationDecl *>(D), PID);
6213 static bool needsDestructMethod(ObjCImplementationDecl *impl) {
6214 const ObjCInterfaceDecl *iface = impl->getClassInterface();
6215 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
6216 ivar; ivar = ivar->getNextIvar())
6217 if (ivar->getType().isDestructedType())
6218 return true;
6220 return false;
6223 static bool AllTrivialInitializers(CodeGenModule &CGM,
6224 ObjCImplementationDecl *D) {
6225 CodeGenFunction CGF(CGM);
6226 for (ObjCImplementationDecl::init_iterator B = D->init_begin(),
6227 E = D->init_end(); B != E; ++B) {
6228 CXXCtorInitializer *CtorInitExp = *B;
6229 Expr *Init = CtorInitExp->getInit();
6230 if (!CGF.isTrivialInitializer(Init))
6231 return false;
6233 return true;
6236 /// EmitObjCIvarInitializations - Emit information for ivar initialization
6237 /// for an implementation.
6238 void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
6239 // We might need a .cxx_destruct even if we don't have any ivar initializers.
6240 if (needsDestructMethod(D)) {
6241 IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
6242 Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
6243 ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create(
6244 getContext(), D->getLocation(), D->getLocation(), cxxSelector,
6245 getContext().VoidTy, nullptr, D,
6246 /*isInstance=*/true, /*isVariadic=*/false,
6247 /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
6248 /*isImplicitlyDeclared=*/true,
6249 /*isDefined=*/false, ObjCMethodDecl::Required);
6250 D->addInstanceMethod(DTORMethod);
6251 CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
6252 D->setHasDestructors(true);
6255 // If the implementation doesn't have any ivar initializers, we don't need
6256 // a .cxx_construct.
6257 if (D->getNumIvarInitializers() == 0 ||
6258 AllTrivialInitializers(*this, D))
6259 return;
6261 IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
6262 Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
6263 // The constructor returns 'self'.
6264 ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(
6265 getContext(), D->getLocation(), D->getLocation(), cxxSelector,
6266 getContext().getObjCIdType(), nullptr, D, /*isInstance=*/true,
6267 /*isVariadic=*/false,
6268 /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
6269 /*isImplicitlyDeclared=*/true,
6270 /*isDefined=*/false, ObjCMethodDecl::Required);
6271 D->addInstanceMethod(CTORMethod);
6272 CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
6273 D->setHasNonZeroConstructors(true);
6276 // EmitLinkageSpec - Emit all declarations in a linkage spec.
6277 void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
6278 if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
6279 LSD->getLanguage() != LinkageSpecDecl::lang_cxx) {
6280 ErrorUnsupported(LSD, "linkage spec");
6281 return;
6284 EmitDeclContext(LSD);
6287 void CodeGenModule::EmitTopLevelStmt(const TopLevelStmtDecl *D) {
6288 // Device code should not be at top level.
6289 if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
6290 return;
6292 std::unique_ptr<CodeGenFunction> &CurCGF =
6293 GlobalTopLevelStmtBlockInFlight.first;
6295 // We emitted a top-level stmt but after it there is initialization.
6296 // Stop squashing the top-level stmts into a single function.
6297 if (CurCGF && CXXGlobalInits.back() != CurCGF->CurFn) {
6298 CurCGF->FinishFunction(D->getEndLoc());
6299 CurCGF = nullptr;
6302 if (!CurCGF) {
6303 // void __stmts__N(void)
6304 // FIXME: Ask the ABI name mangler to pick a name.
6305 std::string Name = "__stmts__" + llvm::utostr(CXXGlobalInits.size());
6306 FunctionArgList Args;
6307 QualType RetTy = getContext().VoidTy;
6308 const CGFunctionInfo &FnInfo =
6309 getTypes().arrangeBuiltinFunctionDeclaration(RetTy, Args);
6310 llvm::FunctionType *FnTy = getTypes().GetFunctionType(FnInfo);
6311 llvm::Function *Fn = llvm::Function::Create(
6312 FnTy, llvm::GlobalValue::InternalLinkage, Name, &getModule());
6314 CurCGF.reset(new CodeGenFunction(*this));
6315 GlobalTopLevelStmtBlockInFlight.second = D;
6316 CurCGF->StartFunction(GlobalDecl(), RetTy, Fn, FnInfo, Args,
6317 D->getBeginLoc(), D->getBeginLoc());
6318 CXXGlobalInits.push_back(Fn);
6321 CurCGF->EmitStmt(D->getStmt());
6324 void CodeGenModule::EmitDeclContext(const DeclContext *DC) {
6325 for (auto *I : DC->decls()) {
6326 // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
6327 // are themselves considered "top-level", so EmitTopLevelDecl on an
6328 // ObjCImplDecl does not recursively visit them. We need to do that in
6329 // case they're nested inside another construct (LinkageSpecDecl /
6330 // ExportDecl) that does stop them from being considered "top-level".
6331 if (auto *OID = dyn_cast<ObjCImplDecl>(I)) {
6332 for (auto *M : OID->methods())
6333 EmitTopLevelDecl(M);
6336 EmitTopLevelDecl(I);
6340 /// EmitTopLevelDecl - Emit code for a single top level declaration.
6341 void CodeGenModule::EmitTopLevelDecl(Decl *D) {
6342 // Ignore dependent declarations.
6343 if (D->isTemplated())
6344 return;
6346 // Consteval function shouldn't be emitted.
6347 if (auto *FD = dyn_cast<FunctionDecl>(D); FD && FD->isImmediateFunction())
6348 return;
6350 switch (D->getKind()) {
6351 case Decl::CXXConversion:
6352 case Decl::CXXMethod:
6353 case Decl::Function:
6354 EmitGlobal(cast<FunctionDecl>(D));
6355 // Always provide some coverage mapping
6356 // even for the functions that aren't emitted.
6357 AddDeferredUnusedCoverageMapping(D);
6358 break;
6360 case Decl::CXXDeductionGuide:
6361 // Function-like, but does not result in code emission.
6362 break;
6364 case Decl::Var:
6365 case Decl::Decomposition:
6366 case Decl::VarTemplateSpecialization:
6367 EmitGlobal(cast<VarDecl>(D));
6368 if (auto *DD = dyn_cast<DecompositionDecl>(D))
6369 for (auto *B : DD->bindings())
6370 if (auto *HD = B->getHoldingVar())
6371 EmitGlobal(HD);
6372 break;
6374 // Indirect fields from global anonymous structs and unions can be
6375 // ignored; only the actual variable requires IR gen support.
6376 case Decl::IndirectField:
6377 break;
6379 // C++ Decls
6380 case Decl::Namespace:
6381 EmitDeclContext(cast<NamespaceDecl>(D));
6382 break;
6383 case Decl::ClassTemplateSpecialization: {
6384 const auto *Spec = cast<ClassTemplateSpecializationDecl>(D);
6385 if (CGDebugInfo *DI = getModuleDebugInfo())
6386 if (Spec->getSpecializationKind() ==
6387 TSK_ExplicitInstantiationDefinition &&
6388 Spec->hasDefinition())
6389 DI->completeTemplateDefinition(*Spec);
6390 } [[fallthrough]];
6391 case Decl::CXXRecord: {
6392 CXXRecordDecl *CRD = cast<CXXRecordDecl>(D);
6393 if (CGDebugInfo *DI = getModuleDebugInfo()) {
6394 if (CRD->hasDefinition())
6395 DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
6396 if (auto *ES = D->getASTContext().getExternalSource())
6397 if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
6398 DI->completeUnusedClass(*CRD);
6400 // Emit any static data members, they may be definitions.
6401 for (auto *I : CRD->decls())
6402 if (isa<VarDecl>(I) || isa<CXXRecordDecl>(I))
6403 EmitTopLevelDecl(I);
6404 break;
6406 // No code generation needed.
6407 case Decl::UsingShadow:
6408 case Decl::ClassTemplate:
6409 case Decl::VarTemplate:
6410 case Decl::Concept:
6411 case Decl::VarTemplatePartialSpecialization:
6412 case Decl::FunctionTemplate:
6413 case Decl::TypeAliasTemplate:
6414 case Decl::Block:
6415 case Decl::Empty:
6416 case Decl::Binding:
6417 break;
6418 case Decl::Using: // using X; [C++]
6419 if (CGDebugInfo *DI = getModuleDebugInfo())
6420 DI->EmitUsingDecl(cast<UsingDecl>(*D));
6421 break;
6422 case Decl::UsingEnum: // using enum X; [C++]
6423 if (CGDebugInfo *DI = getModuleDebugInfo())
6424 DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(*D));
6425 break;
6426 case Decl::NamespaceAlias:
6427 if (CGDebugInfo *DI = getModuleDebugInfo())
6428 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(*D));
6429 break;
6430 case Decl::UsingDirective: // using namespace X; [C++]
6431 if (CGDebugInfo *DI = getModuleDebugInfo())
6432 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
6433 break;
6434 case Decl::CXXConstructor:
6435 getCXXABI().EmitCXXConstructors(cast<CXXConstructorDecl>(D));
6436 break;
6437 case Decl::CXXDestructor:
6438 getCXXABI().EmitCXXDestructors(cast<CXXDestructorDecl>(D));
6439 break;
6441 case Decl::StaticAssert:
6442 // Nothing to do.
6443 break;
6445 // Objective-C Decls
6447 // Forward declarations, no (immediate) code generation.
6448 case Decl::ObjCInterface:
6449 case Decl::ObjCCategory:
6450 break;
6452 case Decl::ObjCProtocol: {
6453 auto *Proto = cast<ObjCProtocolDecl>(D);
6454 if (Proto->isThisDeclarationADefinition())
6455 ObjCRuntime->GenerateProtocol(Proto);
6456 break;
6459 case Decl::ObjCCategoryImpl:
6460 // Categories have properties but don't support synthesize so we
6461 // can ignore them here.
6462 ObjCRuntime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
6463 break;
6465 case Decl::ObjCImplementation: {
6466 auto *OMD = cast<ObjCImplementationDecl>(D);
6467 EmitObjCPropertyImplementations(OMD);
6468 EmitObjCIvarInitializations(OMD);
6469 ObjCRuntime->GenerateClass(OMD);
6470 // Emit global variable debug information.
6471 if (CGDebugInfo *DI = getModuleDebugInfo())
6472 if (getCodeGenOpts().hasReducedDebugInfo())
6473 DI->getOrCreateInterfaceType(getContext().getObjCInterfaceType(
6474 OMD->getClassInterface()), OMD->getLocation());
6475 break;
6477 case Decl::ObjCMethod: {
6478 auto *OMD = cast<ObjCMethodDecl>(D);
6479 // If this is not a prototype, emit the body.
6480 if (OMD->getBody())
6481 CodeGenFunction(*this).GenerateObjCMethod(OMD);
6482 break;
6484 case Decl::ObjCCompatibleAlias:
6485 ObjCRuntime->RegisterAlias(cast<ObjCCompatibleAliasDecl>(D));
6486 break;
6488 case Decl::PragmaComment: {
6489 const auto *PCD = cast<PragmaCommentDecl>(D);
6490 switch (PCD->getCommentKind()) {
6491 case PCK_Unknown:
6492 llvm_unreachable("unexpected pragma comment kind");
6493 case PCK_Linker:
6494 AppendLinkerOptions(PCD->getArg());
6495 break;
6496 case PCK_Lib:
6497 AddDependentLib(PCD->getArg());
6498 break;
6499 case PCK_Compiler:
6500 case PCK_ExeStr:
6501 case PCK_User:
6502 break; // We ignore all of these.
6504 break;
6507 case Decl::PragmaDetectMismatch: {
6508 const auto *PDMD = cast<PragmaDetectMismatchDecl>(D);
6509 AddDetectMismatch(PDMD->getName(), PDMD->getValue());
6510 break;
6513 case Decl::LinkageSpec:
6514 EmitLinkageSpec(cast<LinkageSpecDecl>(D));
6515 break;
6517 case Decl::FileScopeAsm: {
6518 // File-scope asm is ignored during device-side CUDA compilation.
6519 if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
6520 break;
6521 // File-scope asm is ignored during device-side OpenMP compilation.
6522 if (LangOpts.OpenMPIsDevice)
6523 break;
6524 // File-scope asm is ignored during device-side SYCL compilation.
6525 if (LangOpts.SYCLIsDevice)
6526 break;
6527 auto *AD = cast<FileScopeAsmDecl>(D);
6528 getModule().appendModuleInlineAsm(AD->getAsmString()->getString());
6529 break;
6532 case Decl::TopLevelStmt:
6533 EmitTopLevelStmt(cast<TopLevelStmtDecl>(D));
6534 break;
6536 case Decl::Import: {
6537 auto *Import = cast<ImportDecl>(D);
6539 // If we've already imported this module, we're done.
6540 if (!ImportedModules.insert(Import->getImportedModule()))
6541 break;
6543 // Emit debug information for direct imports.
6544 if (!Import->getImportedOwningModule()) {
6545 if (CGDebugInfo *DI = getModuleDebugInfo())
6546 DI->EmitImportDecl(*Import);
6549 // For C++ standard modules we are done - we will call the module
6550 // initializer for imported modules, and that will likewise call those for
6551 // any imports it has.
6552 if (CXX20ModuleInits && Import->getImportedOwningModule() &&
6553 !Import->getImportedOwningModule()->isModuleMapModule())
6554 break;
6556 // For clang C++ module map modules the initializers for sub-modules are
6557 // emitted here.
6559 // Find all of the submodules and emit the module initializers.
6560 llvm::SmallPtrSet<clang::Module *, 16> Visited;
6561 SmallVector<clang::Module *, 16> Stack;
6562 Visited.insert(Import->getImportedModule());
6563 Stack.push_back(Import->getImportedModule());
6565 while (!Stack.empty()) {
6566 clang::Module *Mod = Stack.pop_back_val();
6567 if (!EmittedModuleInitializers.insert(Mod).second)
6568 continue;
6570 for (auto *D : Context.getModuleInitializers(Mod))
6571 EmitTopLevelDecl(D);
6573 // Visit the submodules of this module.
6574 for (auto *Submodule : Mod->submodules()) {
6575 // Skip explicit children; they need to be explicitly imported to emit
6576 // the initializers.
6577 if (Submodule->IsExplicit)
6578 continue;
6580 if (Visited.insert(Submodule).second)
6581 Stack.push_back(Submodule);
6584 break;
6587 case Decl::Export:
6588 EmitDeclContext(cast<ExportDecl>(D));
6589 break;
6591 case Decl::OMPThreadPrivate:
6592 EmitOMPThreadPrivateDecl(cast<OMPThreadPrivateDecl>(D));
6593 break;
6595 case Decl::OMPAllocate:
6596 EmitOMPAllocateDecl(cast<OMPAllocateDecl>(D));
6597 break;
6599 case Decl::OMPDeclareReduction:
6600 EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(D));
6601 break;
6603 case Decl::OMPDeclareMapper:
6604 EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(D));
6605 break;
6607 case Decl::OMPRequires:
6608 EmitOMPRequiresDecl(cast<OMPRequiresDecl>(D));
6609 break;
6611 case Decl::Typedef:
6612 case Decl::TypeAlias: // using foo = bar; [C++11]
6613 if (CGDebugInfo *DI = getModuleDebugInfo())
6614 DI->EmitAndRetainType(
6615 getContext().getTypedefType(cast<TypedefNameDecl>(D)));
6616 break;
6618 case Decl::Record:
6619 if (CGDebugInfo *DI = getModuleDebugInfo())
6620 if (cast<RecordDecl>(D)->getDefinition())
6621 DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
6622 break;
6624 case Decl::Enum:
6625 if (CGDebugInfo *DI = getModuleDebugInfo())
6626 if (cast<EnumDecl>(D)->getDefinition())
6627 DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(D)));
6628 break;
6630 case Decl::HLSLBuffer:
6631 getHLSLRuntime().addBuffer(cast<HLSLBufferDecl>(D));
6632 break;
6634 default:
6635 // Make sure we handled everything we should, every other kind is a
6636 // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
6637 // function. Need to recode Decl::Kind to do that easily.
6638 assert(isa<TypeDecl>(D) && "Unsupported decl kind");
6639 break;
6643 void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
6644 // Do we need to generate coverage mapping?
6645 if (!CodeGenOpts.CoverageMapping)
6646 return;
6647 switch (D->getKind()) {
6648 case Decl::CXXConversion:
6649 case Decl::CXXMethod:
6650 case Decl::Function:
6651 case Decl::ObjCMethod:
6652 case Decl::CXXConstructor:
6653 case Decl::CXXDestructor: {
6654 if (!cast<FunctionDecl>(D)->doesThisDeclarationHaveABody())
6655 break;
6656 SourceManager &SM = getContext().getSourceManager();
6657 if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(D->getBeginLoc()))
6658 break;
6659 auto I = DeferredEmptyCoverageMappingDecls.find(D);
6660 if (I == DeferredEmptyCoverageMappingDecls.end())
6661 DeferredEmptyCoverageMappingDecls[D] = true;
6662 break;
6664 default:
6665 break;
6669 void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) {
6670 // Do we need to generate coverage mapping?
6671 if (!CodeGenOpts.CoverageMapping)
6672 return;
6673 if (const auto *Fn = dyn_cast<FunctionDecl>(D)) {
6674 if (Fn->isTemplateInstantiation())
6675 ClearUnusedCoverageMapping(Fn->getTemplateInstantiationPattern());
6677 auto I = DeferredEmptyCoverageMappingDecls.find(D);
6678 if (I == DeferredEmptyCoverageMappingDecls.end())
6679 DeferredEmptyCoverageMappingDecls[D] = false;
6680 else
6681 I->second = false;
6684 void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
6685 // We call takeVector() here to avoid use-after-free.
6686 // FIXME: DeferredEmptyCoverageMappingDecls is getting mutated because
6687 // we deserialize function bodies to emit coverage info for them, and that
6688 // deserializes more declarations. How should we handle that case?
6689 for (const auto &Entry : DeferredEmptyCoverageMappingDecls.takeVector()) {
6690 if (!Entry.second)
6691 continue;
6692 const Decl *D = Entry.first;
6693 switch (D->getKind()) {
6694 case Decl::CXXConversion:
6695 case Decl::CXXMethod:
6696 case Decl::Function:
6697 case Decl::ObjCMethod: {
6698 CodeGenPGO PGO(*this);
6699 GlobalDecl GD(cast<FunctionDecl>(D));
6700 PGO.emitEmptyCounterMapping(D, getMangledName(GD),
6701 getFunctionLinkage(GD));
6702 break;
6704 case Decl::CXXConstructor: {
6705 CodeGenPGO PGO(*this);
6706 GlobalDecl GD(cast<CXXConstructorDecl>(D), Ctor_Base);
6707 PGO.emitEmptyCounterMapping(D, getMangledName(GD),
6708 getFunctionLinkage(GD));
6709 break;
6711 case Decl::CXXDestructor: {
6712 CodeGenPGO PGO(*this);
6713 GlobalDecl GD(cast<CXXDestructorDecl>(D), Dtor_Base);
6714 PGO.emitEmptyCounterMapping(D, getMangledName(GD),
6715 getFunctionLinkage(GD));
6716 break;
6718 default:
6719 break;
6724 void CodeGenModule::EmitMainVoidAlias() {
6725 // In order to transition away from "__original_main" gracefully, emit an
6726 // alias for "main" in the no-argument case so that libc can detect when
6727 // new-style no-argument main is in used.
6728 if (llvm::Function *F = getModule().getFunction("main")) {
6729 if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() &&
6730 F->getReturnType()->isIntegerTy(Context.getTargetInfo().getIntWidth())) {
6731 auto *GA = llvm::GlobalAlias::create("__main_void", F);
6732 GA->setVisibility(llvm::GlobalValue::HiddenVisibility);
6737 /// Turns the given pointer into a constant.
6738 static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
6739 const void *Ptr) {
6740 uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
6741 llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
6742 return llvm::ConstantInt::get(i64, PtrInt);
6745 static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
6746 llvm::NamedMDNode *&GlobalMetadata,
6747 GlobalDecl D,
6748 llvm::GlobalValue *Addr) {
6749 if (!GlobalMetadata)
6750 GlobalMetadata =
6751 CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
6753 // TODO: should we report variant information for ctors/dtors?
6754 llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(Addr),
6755 llvm::ConstantAsMetadata::get(GetPointerConstant(
6756 CGM.getLLVMContext(), D.getDecl()))};
6757 GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
6760 bool CodeGenModule::CheckAndReplaceExternCIFuncs(llvm::GlobalValue *Elem,
6761 llvm::GlobalValue *CppFunc) {
6762 // Store the list of ifuncs we need to replace uses in.
6763 llvm::SmallVector<llvm::GlobalIFunc *> IFuncs;
6764 // List of ConstantExprs that we should be able to delete when we're done
6765 // here.
6766 llvm::SmallVector<llvm::ConstantExpr *> CEs;
6768 // It isn't valid to replace the extern-C ifuncs if all we find is itself!
6769 if (Elem == CppFunc)
6770 return false;
6772 // First make sure that all users of this are ifuncs (or ifuncs via a
6773 // bitcast), and collect the list of ifuncs and CEs so we can work on them
6774 // later.
6775 for (llvm::User *User : Elem->users()) {
6776 // Users can either be a bitcast ConstExpr that is used by the ifuncs, OR an
6777 // ifunc directly. In any other case, just give up, as we don't know what we
6778 // could break by changing those.
6779 if (auto *ConstExpr = dyn_cast<llvm::ConstantExpr>(User)) {
6780 if (ConstExpr->getOpcode() != llvm::Instruction::BitCast)
6781 return false;
6783 for (llvm::User *CEUser : ConstExpr->users()) {
6784 if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(CEUser)) {
6785 IFuncs.push_back(IFunc);
6786 } else {
6787 return false;
6790 CEs.push_back(ConstExpr);
6791 } else if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(User)) {
6792 IFuncs.push_back(IFunc);
6793 } else {
6794 // This user is one we don't know how to handle, so fail redirection. This
6795 // will result in an ifunc retaining a resolver name that will ultimately
6796 // fail to be resolved to a defined function.
6797 return false;
6801 // Now we know this is a valid case where we can do this alias replacement, we
6802 // need to remove all of the references to Elem (and the bitcasts!) so we can
6803 // delete it.
6804 for (llvm::GlobalIFunc *IFunc : IFuncs)
6805 IFunc->setResolver(nullptr);
6806 for (llvm::ConstantExpr *ConstExpr : CEs)
6807 ConstExpr->destroyConstant();
6809 // We should now be out of uses for the 'old' version of this function, so we
6810 // can erase it as well.
6811 Elem->eraseFromParent();
6813 for (llvm::GlobalIFunc *IFunc : IFuncs) {
6814 // The type of the resolver is always just a function-type that returns the
6815 // type of the IFunc, so create that here. If the type of the actual
6816 // resolver doesn't match, it just gets bitcast to the right thing.
6817 auto *ResolverTy =
6818 llvm::FunctionType::get(IFunc->getType(), /*isVarArg*/ false);
6819 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
6820 CppFunc->getName(), ResolverTy, {}, /*ForVTable*/ false);
6821 IFunc->setResolver(Resolver);
6823 return true;
6826 /// For each function which is declared within an extern "C" region and marked
6827 /// as 'used', but has internal linkage, create an alias from the unmangled
6828 /// name to the mangled name if possible. People expect to be able to refer
6829 /// to such functions with an unmangled name from inline assembly within the
6830 /// same translation unit.
6831 void CodeGenModule::EmitStaticExternCAliases() {
6832 if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases())
6833 return;
6834 for (auto &I : StaticExternCValues) {
6835 IdentifierInfo *Name = I.first;
6836 llvm::GlobalValue *Val = I.second;
6838 // If Val is null, that implies there were multiple declarations that each
6839 // had a claim to the unmangled name. In this case, generation of the alias
6840 // is suppressed. See CodeGenModule::MaybeHandleStaticInExternC.
6841 if (!Val)
6842 break;
6844 llvm::GlobalValue *ExistingElem =
6845 getModule().getNamedValue(Name->getName());
6847 // If there is either not something already by this name, or we were able to
6848 // replace all uses from IFuncs, create the alias.
6849 if (!ExistingElem || CheckAndReplaceExternCIFuncs(ExistingElem, Val))
6850 addCompilerUsedGlobal(llvm::GlobalAlias::create(Name->getName(), Val));
6854 bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName,
6855 GlobalDecl &Result) const {
6856 auto Res = Manglings.find(MangledName);
6857 if (Res == Manglings.end())
6858 return false;
6859 Result = Res->getValue();
6860 return true;
6863 /// Emits metadata nodes associating all the global values in the
6864 /// current module with the Decls they came from. This is useful for
6865 /// projects using IR gen as a subroutine.
6867 /// Since there's currently no way to associate an MDNode directly
6868 /// with an llvm::GlobalValue, we create a global named metadata
6869 /// with the name 'clang.global.decl.ptrs'.
6870 void CodeGenModule::EmitDeclMetadata() {
6871 llvm::NamedMDNode *GlobalMetadata = nullptr;
6873 for (auto &I : MangledDeclNames) {
6874 llvm::GlobalValue *Addr = getModule().getNamedValue(I.second);
6875 // Some mangled names don't necessarily have an associated GlobalValue
6876 // in this module, e.g. if we mangled it for DebugInfo.
6877 if (Addr)
6878 EmitGlobalDeclMetadata(*this, GlobalMetadata, I.first, Addr);
6882 /// Emits metadata nodes for all the local variables in the current
6883 /// function.
6884 void CodeGenFunction::EmitDeclMetadata() {
6885 if (LocalDeclMap.empty()) return;
6887 llvm::LLVMContext &Context = getLLVMContext();
6889 // Find the unique metadata ID for this name.
6890 unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr");
6892 llvm::NamedMDNode *GlobalMetadata = nullptr;
6894 for (auto &I : LocalDeclMap) {
6895 const Decl *D = I.first;
6896 llvm::Value *Addr = I.second.getPointer();
6897 if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
6898 llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
6899 Alloca->setMetadata(
6900 DeclPtrKind, llvm::MDNode::get(
6901 Context, llvm::ValueAsMetadata::getConstant(DAddr)));
6902 } else if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
6903 GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
6904 EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
6909 void CodeGenModule::EmitVersionIdentMetadata() {
6910 llvm::NamedMDNode *IdentMetadata =
6911 TheModule.getOrInsertNamedMetadata("llvm.ident");
6912 std::string Version = getClangFullVersion();
6913 llvm::LLVMContext &Ctx = TheModule.getContext();
6915 llvm::Metadata *IdentNode[] = {llvm::MDString::get(Ctx, Version)};
6916 IdentMetadata->addOperand(llvm::MDNode::get(Ctx, IdentNode));
6919 void CodeGenModule::EmitCommandLineMetadata() {
6920 llvm::NamedMDNode *CommandLineMetadata =
6921 TheModule.getOrInsertNamedMetadata("llvm.commandline");
6922 std::string CommandLine = getCodeGenOpts().RecordCommandLine;
6923 llvm::LLVMContext &Ctx = TheModule.getContext();
6925 llvm::Metadata *CommandLineNode[] = {llvm::MDString::get(Ctx, CommandLine)};
6926 CommandLineMetadata->addOperand(llvm::MDNode::get(Ctx, CommandLineNode));
6929 void CodeGenModule::EmitCoverageFile() {
6930 llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu");
6931 if (!CUNode)
6932 return;
6934 llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov");
6935 llvm::LLVMContext &Ctx = TheModule.getContext();
6936 auto *CoverageDataFile =
6937 llvm::MDString::get(Ctx, getCodeGenOpts().CoverageDataFile);
6938 auto *CoverageNotesFile =
6939 llvm::MDString::get(Ctx, getCodeGenOpts().CoverageNotesFile);
6940 for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
6941 llvm::MDNode *CU = CUNode->getOperand(i);
6942 llvm::Metadata *Elts[] = {CoverageNotesFile, CoverageDataFile, CU};
6943 GCov->addOperand(llvm::MDNode::get(Ctx, Elts));
6947 llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
6948 bool ForEH) {
6949 // Return a bogus pointer if RTTI is disabled, unless it's for EH.
6950 // FIXME: should we even be calling this method if RTTI is disabled
6951 // and it's not for EH?
6952 if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice ||
6953 (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
6954 getTriple().isNVPTX()))
6955 return llvm::Constant::getNullValue(Int8PtrTy);
6957 if (ForEH && Ty->isObjCObjectPointerType() &&
6958 LangOpts.ObjCRuntime.isGNUFamily())
6959 return ObjCRuntime->GetEHType(Ty);
6961 return getCXXABI().getAddrOfRTTIDescriptor(Ty);
6964 void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
6965 // Do not emit threadprivates in simd-only mode.
6966 if (LangOpts.OpenMP && LangOpts.OpenMPSimd)
6967 return;
6968 for (auto RefExpr : D->varlists()) {
6969 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(RefExpr)->getDecl());
6970 bool PerformInit =
6971 VD->getAnyInitializer() &&
6972 !VD->getAnyInitializer()->isConstantInitializer(getContext(),
6973 /*ForRef=*/false);
6975 Address Addr(GetAddrOfGlobalVar(VD),
6976 getTypes().ConvertTypeForMem(VD->getType()),
6977 getContext().getDeclAlign(VD));
6978 if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
6979 VD, Addr, RefExpr->getBeginLoc(), PerformInit))
6980 CXXGlobalInits.push_back(InitFunction);
6984 llvm::Metadata *
6985 CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
6986 StringRef Suffix) {
6987 if (auto *FnType = T->getAs<FunctionProtoType>())
6988 T = getContext().getFunctionType(
6989 FnType->getReturnType(), FnType->getParamTypes(),
6990 FnType->getExtProtoInfo().withExceptionSpec(EST_None));
6992 llvm::Metadata *&InternalId = Map[T.getCanonicalType()];
6993 if (InternalId)
6994 return InternalId;
6996 if (isExternallyVisible(T->getLinkage())) {
6997 std::string OutName;
6998 llvm::raw_string_ostream Out(OutName);
6999 getCXXABI().getMangleContext().mangleTypeName(
7000 T, Out, getCodeGenOpts().SanitizeCfiICallNormalizeIntegers);
7002 if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers)
7003 Out << ".normalized";
7005 Out << Suffix;
7007 InternalId = llvm::MDString::get(getLLVMContext(), Out.str());
7008 } else {
7009 InternalId = llvm::MDNode::getDistinct(getLLVMContext(),
7010 llvm::ArrayRef<llvm::Metadata *>());
7013 return InternalId;
7016 llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) {
7017 return CreateMetadataIdentifierImpl(T, MetadataIdMap, "");
7020 llvm::Metadata *
7021 CodeGenModule::CreateMetadataIdentifierForVirtualMemPtrType(QualType T) {
7022 return CreateMetadataIdentifierImpl(T, VirtualMetadataIdMap, ".virtual");
7025 // Generalize pointer types to a void pointer with the qualifiers of the
7026 // originally pointed-to type, e.g. 'const char *' and 'char * const *'
7027 // generalize to 'const void *' while 'char *' and 'const char **' generalize to
7028 // 'void *'.
7029 static QualType GeneralizeType(ASTContext &Ctx, QualType Ty) {
7030 if (!Ty->isPointerType())
7031 return Ty;
7033 return Ctx.getPointerType(
7034 QualType(Ctx.VoidTy).withCVRQualifiers(
7035 Ty->getPointeeType().getCVRQualifiers()));
7038 // Apply type generalization to a FunctionType's return and argument types
7039 static QualType GeneralizeFunctionType(ASTContext &Ctx, QualType Ty) {
7040 if (auto *FnType = Ty->getAs<FunctionProtoType>()) {
7041 SmallVector<QualType, 8> GeneralizedParams;
7042 for (auto &Param : FnType->param_types())
7043 GeneralizedParams.push_back(GeneralizeType(Ctx, Param));
7045 return Ctx.getFunctionType(
7046 GeneralizeType(Ctx, FnType->getReturnType()),
7047 GeneralizedParams, FnType->getExtProtoInfo());
7050 if (auto *FnType = Ty->getAs<FunctionNoProtoType>())
7051 return Ctx.getFunctionNoProtoType(
7052 GeneralizeType(Ctx, FnType->getReturnType()));
7054 llvm_unreachable("Encountered unknown FunctionType");
7057 llvm::Metadata *CodeGenModule::CreateMetadataIdentifierGeneralized(QualType T) {
7058 return CreateMetadataIdentifierImpl(GeneralizeFunctionType(getContext(), T),
7059 GeneralizedMetadataIdMap, ".generalized");
7062 /// Returns whether this module needs the "all-vtables" type identifier.
7063 bool CodeGenModule::NeedAllVtablesTypeId() const {
7064 // Returns true if at least one of vtable-based CFI checkers is enabled and
7065 // is not in the trapping mode.
7066 return ((LangOpts.Sanitize.has(SanitizerKind::CFIVCall) &&
7067 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIVCall)) ||
7068 (LangOpts.Sanitize.has(SanitizerKind::CFINVCall) &&
7069 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFINVCall)) ||
7070 (LangOpts.Sanitize.has(SanitizerKind::CFIDerivedCast) &&
7071 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIDerivedCast)) ||
7072 (LangOpts.Sanitize.has(SanitizerKind::CFIUnrelatedCast) &&
7073 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIUnrelatedCast)));
7076 void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable,
7077 CharUnits Offset,
7078 const CXXRecordDecl *RD) {
7079 llvm::Metadata *MD =
7080 CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
7081 VTable->addTypeMetadata(Offset.getQuantity(), MD);
7083 if (CodeGenOpts.SanitizeCfiCrossDso)
7084 if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
7085 VTable->addTypeMetadata(Offset.getQuantity(),
7086 llvm::ConstantAsMetadata::get(CrossDsoTypeId));
7088 if (NeedAllVtablesTypeId()) {
7089 llvm::Metadata *MD = llvm::MDString::get(getLLVMContext(), "all-vtables");
7090 VTable->addTypeMetadata(Offset.getQuantity(), MD);
7094 llvm::SanitizerStatReport &CodeGenModule::getSanStats() {
7095 if (!SanStats)
7096 SanStats = std::make_unique<llvm::SanitizerStatReport>(&getModule());
7098 return *SanStats;
7101 llvm::Value *
7102 CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E,
7103 CodeGenFunction &CGF) {
7104 llvm::Constant *C = ConstantEmitter(CGF).emitAbstract(E, E->getType());
7105 auto *SamplerT = getOpenCLRuntime().getSamplerType(E->getType().getTypePtr());
7106 auto *FTy = llvm::FunctionType::get(SamplerT, {C->getType()}, false);
7107 auto *Call = CGF.EmitRuntimeCall(
7108 CreateRuntimeFunction(FTy, "__translate_sampler_initializer"), {C});
7109 return Call;
7112 CharUnits CodeGenModule::getNaturalPointeeTypeAlignment(
7113 QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) {
7114 return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
7115 /* forPointeeType= */ true);
7118 CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T,
7119 LValueBaseInfo *BaseInfo,
7120 TBAAAccessInfo *TBAAInfo,
7121 bool forPointeeType) {
7122 if (TBAAInfo)
7123 *TBAAInfo = getTBAAAccessInfo(T);
7125 // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But
7126 // that doesn't return the information we need to compute BaseInfo.
7128 // Honor alignment typedef attributes even on incomplete types.
7129 // We also honor them straight for C++ class types, even as pointees;
7130 // there's an expressivity gap here.
7131 if (auto TT = T->getAs<TypedefType>()) {
7132 if (auto Align = TT->getDecl()->getMaxAlignment()) {
7133 if (BaseInfo)
7134 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
7135 return getContext().toCharUnitsFromBits(Align);
7139 bool AlignForArray = T->isArrayType();
7141 // Analyze the base element type, so we don't get confused by incomplete
7142 // array types.
7143 T = getContext().getBaseElementType(T);
7145 if (T->isIncompleteType()) {
7146 // We could try to replicate the logic from
7147 // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the
7148 // type is incomplete, so it's impossible to test. We could try to reuse
7149 // getTypeAlignIfKnown, but that doesn't return the information we need
7150 // to set BaseInfo. So just ignore the possibility that the alignment is
7151 // greater than one.
7152 if (BaseInfo)
7153 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
7154 return CharUnits::One();
7157 if (BaseInfo)
7158 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
7160 CharUnits Alignment;
7161 const CXXRecordDecl *RD;
7162 if (T.getQualifiers().hasUnaligned()) {
7163 Alignment = CharUnits::One();
7164 } else if (forPointeeType && !AlignForArray &&
7165 (RD = T->getAsCXXRecordDecl())) {
7166 // For C++ class pointees, we don't know whether we're pointing at a
7167 // base or a complete object, so we generally need to use the
7168 // non-virtual alignment.
7169 Alignment = getClassPointerAlignment(RD);
7170 } else {
7171 Alignment = getContext().getTypeAlignInChars(T);
7174 // Cap to the global maximum type alignment unless the alignment
7175 // was somehow explicit on the type.
7176 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
7177 if (Alignment.getQuantity() > MaxAlign &&
7178 !getContext().isAlignmentRequired(T))
7179 Alignment = CharUnits::fromQuantity(MaxAlign);
7181 return Alignment;
7184 bool CodeGenModule::stopAutoInit() {
7185 unsigned StopAfter = getContext().getLangOpts().TrivialAutoVarInitStopAfter;
7186 if (StopAfter) {
7187 // This number is positive only when -ftrivial-auto-var-init-stop-after=* is
7188 // used
7189 if (NumAutoVarInit >= StopAfter) {
7190 return true;
7192 if (!NumAutoVarInit) {
7193 unsigned DiagID = getDiags().getCustomDiagID(
7194 DiagnosticsEngine::Warning,
7195 "-ftrivial-auto-var-init-stop-after=%0 has been enabled to limit the "
7196 "number of times ftrivial-auto-var-init=%1 gets applied.");
7197 getDiags().Report(DiagID)
7198 << StopAfter
7199 << (getContext().getLangOpts().getTrivialAutoVarInit() ==
7200 LangOptions::TrivialAutoVarInitKind::Zero
7201 ? "zero"
7202 : "pattern");
7204 ++NumAutoVarInit;
7206 return false;
7209 void CodeGenModule::printPostfixForExternalizedDecl(llvm::raw_ostream &OS,
7210 const Decl *D) const {
7211 // ptxas does not allow '.' in symbol names. On the other hand, HIP prefers
7212 // postfix beginning with '.' since the symbol name can be demangled.
7213 if (LangOpts.HIP)
7214 OS << (isa<VarDecl>(D) ? ".static." : ".intern.");
7215 else
7216 OS << (isa<VarDecl>(D) ? "__static__" : "__intern__");
7218 // If the CUID is not specified we try to generate a unique postfix.
7219 if (getLangOpts().CUID.empty()) {
7220 SourceManager &SM = getContext().getSourceManager();
7221 PresumedLoc PLoc = SM.getPresumedLoc(D->getLocation());
7222 assert(PLoc.isValid() && "Source location is expected to be valid.");
7224 // Get the hash of the user defined macros.
7225 llvm::MD5 Hash;
7226 llvm::MD5::MD5Result Result;
7227 for (const auto &Arg : PreprocessorOpts.Macros)
7228 Hash.update(Arg.first);
7229 Hash.final(Result);
7231 // Get the UniqueID for the file containing the decl.
7232 llvm::sys::fs::UniqueID ID;
7233 if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
7234 PLoc = SM.getPresumedLoc(D->getLocation(), /*UseLineDirectives=*/false);
7235 assert(PLoc.isValid() && "Source location is expected to be valid.");
7236 if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
7237 SM.getDiagnostics().Report(diag::err_cannot_open_file)
7238 << PLoc.getFilename() << EC.message();
7240 OS << llvm::format("%x", ID.getFile()) << llvm::format("%x", ID.getDevice())
7241 << "_" << llvm::utohexstr(Result.low(), /*LowerCase=*/true, /*Width=*/8);
7242 } else {
7243 OS << getContext().getCUIDHash();
7247 void CodeGenModule::moveLazyEmissionStates(CodeGenModule *NewBuilder) {
7248 assert(DeferredDeclsToEmit.empty() &&
7249 "Should have emitted all decls deferred to emit.");
7250 assert(NewBuilder->DeferredDecls.empty() &&
7251 "Newly created module should not have deferred decls");
7252 NewBuilder->DeferredDecls = std::move(DeferredDecls);
7254 assert(NewBuilder->DeferredVTables.empty() &&
7255 "Newly created module should not have deferred vtables");
7256 NewBuilder->DeferredVTables = std::move(DeferredVTables);
7258 assert(NewBuilder->MangledDeclNames.empty() &&
7259 "Newly created module should not have mangled decl names");
7260 assert(NewBuilder->Manglings.empty() &&
7261 "Newly created module should not have manglings");
7262 NewBuilder->Manglings = std::move(Manglings);
7264 NewBuilder->WeakRefReferences = std::move(WeakRefReferences);
7266 NewBuilder->TBAA = std::move(TBAA);
7268 assert(NewBuilder->EmittedDeferredDecls.empty() &&
7269 "Still have (unmerged) EmittedDeferredDecls deferred decls");
7271 NewBuilder->EmittedDeferredDecls = std::move(EmittedDeferredDecls);
7273 NewBuilder->ABI->MangleCtx = std::move(ABI->MangleCtx);