[RISCV][FMV] Support target_clones (#85786)
[llvm-project.git] / clang / lib / CodeGen / CodeGenTypes.cpp
blob5eebd8ad2a06537dc3cff9d5d69d7cb8addfaae8
1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the code that handles AST -> LLVM type lowering.
11 //===----------------------------------------------------------------------===//
13 #include "CodeGenTypes.h"
14 #include "CGCXXABI.h"
15 #include "CGCall.h"
16 #include "CGHLSLRuntime.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGRecordLayout.h"
19 #include "TargetInfo.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "clang/AST/Expr.h"
24 #include "clang/AST/RecordLayout.h"
25 #include "clang/CodeGen/CGFunctionInfo.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/Module.h"
30 using namespace clang;
31 using namespace CodeGen;
33 CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
34 : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
35 Target(cgm.getTarget()) {
36 SkippedLayout = false;
37 LongDoubleReferenced = false;
40 CodeGenTypes::~CodeGenTypes() {
41 for (llvm::FoldingSet<CGFunctionInfo>::iterator
42 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
43 delete &*I++;
46 CGCXXABI &CodeGenTypes::getCXXABI() const { return getCGM().getCXXABI(); }
48 const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const {
49 return CGM.getCodeGenOpts();
52 void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
53 llvm::StructType *Ty,
54 StringRef suffix) {
55 SmallString<256> TypeName;
56 llvm::raw_svector_ostream OS(TypeName);
57 OS << RD->getKindName() << '.';
59 // FIXME: We probably want to make more tweaks to the printing policy. For
60 // example, we should probably enable PrintCanonicalTypes and
61 // FullyQualifiedNames.
62 PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy();
63 Policy.SuppressInlineNamespace = false;
65 // Name the codegen type after the typedef name
66 // if there is no tag type name available
67 if (RD->getIdentifier()) {
68 // FIXME: We should not have to check for a null decl context here.
69 // Right now we do it because the implicit Obj-C decls don't have one.
70 if (RD->getDeclContext())
71 RD->printQualifiedName(OS, Policy);
72 else
73 RD->printName(OS, Policy);
74 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
75 // FIXME: We should not have to check for a null decl context here.
76 // Right now we do it because the implicit Obj-C decls don't have one.
77 if (TDD->getDeclContext())
78 TDD->printQualifiedName(OS, Policy);
79 else
80 TDD->printName(OS);
81 } else
82 OS << "anon";
84 if (!suffix.empty())
85 OS << suffix;
87 Ty->setName(OS.str());
90 /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
91 /// ConvertType in that it is used to convert to the memory representation for
92 /// a type. For example, the scalar representation for _Bool is i1, but the
93 /// memory representation is usually i8 or i32, depending on the target.
94 ///
95 /// We generally assume that the alloc size of this type under the LLVM
96 /// data layout is the same as the size of the AST type. The alignment
97 /// does not have to match: Clang should always use explicit alignments
98 /// and packed structs as necessary to produce the layout it needs.
99 /// But the size does need to be exactly right or else things like struct
100 /// layout will break.
101 llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
102 if (T->isConstantMatrixType()) {
103 const Type *Ty = Context.getCanonicalType(T).getTypePtr();
104 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
105 return llvm::ArrayType::get(ConvertType(MT->getElementType()),
106 MT->getNumRows() * MT->getNumColumns());
109 llvm::Type *R = ConvertType(T);
111 // Check for the boolean vector case.
112 if (T->isExtVectorBoolType()) {
113 auto *FixedVT = cast<llvm::FixedVectorType>(R);
114 // Pad to at least one byte.
115 uint64_t BytePadded = std::max<uint64_t>(FixedVT->getNumElements(), 8);
116 return llvm::IntegerType::get(FixedVT->getContext(), BytePadded);
119 // If T is _Bool or a _BitInt type, ConvertType will produce an IR type
120 // with the exact semantic bit-width of the AST type; for example,
121 // _BitInt(17) will turn into i17. In memory, however, we need to store
122 // such values extended to their full storage size as decided by AST
123 // layout; this is an ABI requirement. Ideally, we would always use an
124 // integer type that's just the bit-size of the AST type; for example, if
125 // sizeof(_BitInt(17)) == 4, _BitInt(17) would turn into i32. That is what's
126 // returned by convertTypeForLoadStore. However, that type does not
127 // always satisfy the size requirement on memory representation types
128 // describe above. For example, a 32-bit platform might reasonably set
129 // sizeof(_BitInt(65)) == 12, but i96 is likely to have to have an alloc size
130 // of 16 bytes in the LLVM data layout. In these cases, we simply return
131 // a byte array of the appropriate size.
132 if (T->isBitIntType()) {
133 if (typeRequiresSplitIntoByteArray(T, R))
134 return llvm::ArrayType::get(CGM.Int8Ty,
135 Context.getTypeSizeInChars(T).getQuantity());
136 return llvm::IntegerType::get(getLLVMContext(),
137 (unsigned)Context.getTypeSize(T));
140 if (R->isIntegerTy(1))
141 return llvm::IntegerType::get(getLLVMContext(),
142 (unsigned)Context.getTypeSize(T));
144 // Else, don't map it.
145 return R;
148 bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy,
149 llvm::Type *LLVMTy) {
150 if (!LLVMTy)
151 LLVMTy = ConvertType(ASTTy);
153 CharUnits ASTSize = Context.getTypeSizeInChars(ASTTy);
154 CharUnits LLVMSize =
155 CharUnits::fromQuantity(getDataLayout().getTypeAllocSize(LLVMTy));
156 return ASTSize != LLVMSize;
159 llvm::Type *CodeGenTypes::convertTypeForLoadStore(QualType T,
160 llvm::Type *LLVMTy) {
161 if (!LLVMTy)
162 LLVMTy = ConvertType(T);
164 if (T->isBitIntType())
165 return llvm::Type::getIntNTy(
166 getLLVMContext(), Context.getTypeSizeInChars(T).getQuantity() * 8);
168 if (LLVMTy->isIntegerTy(1))
169 return llvm::IntegerType::get(getLLVMContext(),
170 (unsigned)Context.getTypeSize(T));
172 if (T->isExtVectorBoolType())
173 return ConvertTypeForMem(T);
175 return LLVMTy;
178 /// isRecordLayoutComplete - Return true if the specified type is already
179 /// completely laid out.
180 bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
181 llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
182 RecordDeclTypes.find(Ty);
183 return I != RecordDeclTypes.end() && !I->second->isOpaque();
186 /// isFuncParamTypeConvertible - Return true if the specified type in a
187 /// function parameter or result position can be converted to an IR type at this
188 /// point. This boils down to being whether it is complete.
189 bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
190 // Some ABIs cannot have their member pointers represented in IR unless
191 // certain circumstances have been reached.
192 if (const auto *MPT = Ty->getAs<MemberPointerType>())
193 return getCXXABI().isMemberPointerConvertible(MPT);
195 // If this isn't a tagged type, we can convert it!
196 const TagType *TT = Ty->getAs<TagType>();
197 if (!TT) return true;
199 // Incomplete types cannot be converted.
200 return !TT->isIncompleteType();
204 /// Code to verify a given function type is complete, i.e. the return type
205 /// and all of the parameter types are complete. Also check to see if we are in
206 /// a RS_StructPointer context, and if so whether any struct types have been
207 /// pended. If so, we don't want to ask the ABI lowering code to handle a type
208 /// that cannot be converted to an IR type.
209 bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
210 if (!isFuncParamTypeConvertible(FT->getReturnType()))
211 return false;
213 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
214 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
215 if (!isFuncParamTypeConvertible(FPT->getParamType(i)))
216 return false;
218 return true;
221 /// UpdateCompletedType - When we find the full definition for a TagDecl,
222 /// replace the 'opaque' type we previously made for it if applicable.
223 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
224 // If this is an enum being completed, then we flush all non-struct types from
225 // the cache. This allows function types and other things that may be derived
226 // from the enum to be recomputed.
227 if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
228 // Only flush the cache if we've actually already converted this type.
229 if (TypeCache.count(ED->getTypeForDecl())) {
230 // Okay, we formed some types based on this. We speculated that the enum
231 // would be lowered to i32, so we only need to flush the cache if this
232 // didn't happen.
233 if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
234 TypeCache.clear();
236 // If necessary, provide the full definition of a type only used with a
237 // declaration so far.
238 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
239 DI->completeType(ED);
240 return;
243 // If we completed a RecordDecl that we previously used and converted to an
244 // anonymous type, then go ahead and complete it now.
245 const RecordDecl *RD = cast<RecordDecl>(TD);
246 if (RD->isDependentType()) return;
248 // Only complete it if we converted it already. If we haven't converted it
249 // yet, we'll just do it lazily.
250 if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
251 ConvertRecordDeclType(RD);
253 // If necessary, provide the full definition of a type only used with a
254 // declaration so far.
255 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
256 DI->completeType(RD);
259 void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
260 QualType T = Context.getRecordType(RD);
261 T = Context.getCanonicalType(T);
263 const Type *Ty = T.getTypePtr();
264 if (RecordsWithOpaqueMemberPointers.count(Ty)) {
265 TypeCache.clear();
266 RecordsWithOpaqueMemberPointers.clear();
270 static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
271 const llvm::fltSemantics &format,
272 bool UseNativeHalf = false) {
273 if (&format == &llvm::APFloat::IEEEhalf()) {
274 if (UseNativeHalf)
275 return llvm::Type::getHalfTy(VMContext);
276 else
277 return llvm::Type::getInt16Ty(VMContext);
279 if (&format == &llvm::APFloat::BFloat())
280 return llvm::Type::getBFloatTy(VMContext);
281 if (&format == &llvm::APFloat::IEEEsingle())
282 return llvm::Type::getFloatTy(VMContext);
283 if (&format == &llvm::APFloat::IEEEdouble())
284 return llvm::Type::getDoubleTy(VMContext);
285 if (&format == &llvm::APFloat::IEEEquad())
286 return llvm::Type::getFP128Ty(VMContext);
287 if (&format == &llvm::APFloat::PPCDoubleDouble())
288 return llvm::Type::getPPC_FP128Ty(VMContext);
289 if (&format == &llvm::APFloat::x87DoubleExtended())
290 return llvm::Type::getX86_FP80Ty(VMContext);
291 llvm_unreachable("Unknown float format!");
294 llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
295 assert(QFT.isCanonical());
296 const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
297 // First, check whether we can build the full function type. If the
298 // function type depends on an incomplete type (e.g. a struct or enum), we
299 // cannot lower the function type.
300 if (!isFuncTypeConvertible(FT)) {
301 // This function's type depends on an incomplete tag type.
303 // Force conversion of all the relevant record types, to make sure
304 // we re-convert the FunctionType when appropriate.
305 if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
306 ConvertRecordDeclType(RT->getDecl());
307 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
308 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
309 if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
310 ConvertRecordDeclType(RT->getDecl());
312 SkippedLayout = true;
314 // Return a placeholder type.
315 return llvm::StructType::get(getLLVMContext());
318 // The function type can be built; call the appropriate routines to
319 // build it.
320 const CGFunctionInfo *FI;
321 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
322 FI = &arrangeFreeFunctionType(
323 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
324 } else {
325 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
326 FI = &arrangeFreeFunctionType(
327 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
330 llvm::Type *ResultType = nullptr;
331 // If there is something higher level prodding our CGFunctionInfo, then
332 // don't recurse into it again.
333 if (FunctionsBeingProcessed.count(FI)) {
335 ResultType = llvm::StructType::get(getLLVMContext());
336 SkippedLayout = true;
337 } else {
339 // Otherwise, we're good to go, go ahead and convert it.
340 ResultType = GetFunctionType(*FI);
343 return ResultType;
346 /// ConvertType - Convert the specified type to its LLVM form.
347 llvm::Type *CodeGenTypes::ConvertType(QualType T) {
348 T = Context.getCanonicalType(T);
350 const Type *Ty = T.getTypePtr();
352 // For the device-side compilation, CUDA device builtin surface/texture types
353 // may be represented in different types.
354 if (Context.getLangOpts().CUDAIsDevice) {
355 if (T->isCUDADeviceBuiltinSurfaceType()) {
356 if (auto *Ty = CGM.getTargetCodeGenInfo()
357 .getCUDADeviceBuiltinSurfaceDeviceType())
358 return Ty;
359 } else if (T->isCUDADeviceBuiltinTextureType()) {
360 if (auto *Ty = CGM.getTargetCodeGenInfo()
361 .getCUDADeviceBuiltinTextureDeviceType())
362 return Ty;
366 // RecordTypes are cached and processed specially.
367 if (const RecordType *RT = dyn_cast<RecordType>(Ty))
368 return ConvertRecordDeclType(RT->getDecl());
370 llvm::Type *CachedType = nullptr;
371 auto TCI = TypeCache.find(Ty);
372 if (TCI != TypeCache.end())
373 CachedType = TCI->second;
374 // With expensive checks, check that the type we compute matches the
375 // cached type.
376 #ifndef EXPENSIVE_CHECKS
377 if (CachedType)
378 return CachedType;
379 #endif
381 // If we don't have it in the cache, convert it now.
382 llvm::Type *ResultType = nullptr;
383 switch (Ty->getTypeClass()) {
384 case Type::Record: // Handled above.
385 #define TYPE(Class, Base)
386 #define ABSTRACT_TYPE(Class, Base)
387 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
388 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
389 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
390 #include "clang/AST/TypeNodes.inc"
391 llvm_unreachable("Non-canonical or dependent types aren't possible.");
393 case Type::Builtin: {
394 switch (cast<BuiltinType>(Ty)->getKind()) {
395 case BuiltinType::Void:
396 case BuiltinType::ObjCId:
397 case BuiltinType::ObjCClass:
398 case BuiltinType::ObjCSel:
399 // LLVM void type can only be used as the result of a function call. Just
400 // map to the same as char.
401 ResultType = llvm::Type::getInt8Ty(getLLVMContext());
402 break;
404 case BuiltinType::Bool:
405 // Note that we always return bool as i1 for use as a scalar type.
406 ResultType = llvm::Type::getInt1Ty(getLLVMContext());
407 break;
409 case BuiltinType::Char_S:
410 case BuiltinType::Char_U:
411 case BuiltinType::SChar:
412 case BuiltinType::UChar:
413 case BuiltinType::Short:
414 case BuiltinType::UShort:
415 case BuiltinType::Int:
416 case BuiltinType::UInt:
417 case BuiltinType::Long:
418 case BuiltinType::ULong:
419 case BuiltinType::LongLong:
420 case BuiltinType::ULongLong:
421 case BuiltinType::WChar_S:
422 case BuiltinType::WChar_U:
423 case BuiltinType::Char8:
424 case BuiltinType::Char16:
425 case BuiltinType::Char32:
426 case BuiltinType::ShortAccum:
427 case BuiltinType::Accum:
428 case BuiltinType::LongAccum:
429 case BuiltinType::UShortAccum:
430 case BuiltinType::UAccum:
431 case BuiltinType::ULongAccum:
432 case BuiltinType::ShortFract:
433 case BuiltinType::Fract:
434 case BuiltinType::LongFract:
435 case BuiltinType::UShortFract:
436 case BuiltinType::UFract:
437 case BuiltinType::ULongFract:
438 case BuiltinType::SatShortAccum:
439 case BuiltinType::SatAccum:
440 case BuiltinType::SatLongAccum:
441 case BuiltinType::SatUShortAccum:
442 case BuiltinType::SatUAccum:
443 case BuiltinType::SatULongAccum:
444 case BuiltinType::SatShortFract:
445 case BuiltinType::SatFract:
446 case BuiltinType::SatLongFract:
447 case BuiltinType::SatUShortFract:
448 case BuiltinType::SatUFract:
449 case BuiltinType::SatULongFract:
450 ResultType = llvm::IntegerType::get(getLLVMContext(),
451 static_cast<unsigned>(Context.getTypeSize(T)));
452 break;
454 case BuiltinType::Float16:
455 ResultType =
456 getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T),
457 /* UseNativeHalf = */ true);
458 break;
460 case BuiltinType::Half:
461 // Half FP can either be storage-only (lowered to i16) or native.
462 ResultType = getTypeForFormat(
463 getLLVMContext(), Context.getFloatTypeSemantics(T),
464 Context.getLangOpts().NativeHalfType ||
465 !Context.getTargetInfo().useFP16ConversionIntrinsics());
466 break;
467 case BuiltinType::LongDouble:
468 LongDoubleReferenced = true;
469 [[fallthrough]];
470 case BuiltinType::BFloat16:
471 case BuiltinType::Float:
472 case BuiltinType::Double:
473 case BuiltinType::Float128:
474 case BuiltinType::Ibm128:
475 ResultType = getTypeForFormat(getLLVMContext(),
476 Context.getFloatTypeSemantics(T),
477 /* UseNativeHalf = */ false);
478 break;
480 case BuiltinType::NullPtr:
481 // Model std::nullptr_t as i8*
482 ResultType = llvm::PointerType::getUnqual(getLLVMContext());
483 break;
485 case BuiltinType::UInt128:
486 case BuiltinType::Int128:
487 ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
488 break;
490 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
491 case BuiltinType::Id:
492 #include "clang/Basic/OpenCLImageTypes.def"
493 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
494 case BuiltinType::Id:
495 #include "clang/Basic/OpenCLExtensionTypes.def"
496 case BuiltinType::OCLSampler:
497 case BuiltinType::OCLEvent:
498 case BuiltinType::OCLClkEvent:
499 case BuiltinType::OCLQueue:
500 case BuiltinType::OCLReserveID:
501 ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
502 break;
503 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
504 case BuiltinType::Id:
505 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
506 case BuiltinType::Id:
507 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId)
508 #include "clang/Basic/AArch64SVEACLETypes.def"
510 ASTContext::BuiltinVectorTypeInfo Info =
511 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
512 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
513 Info.EC.getKnownMinValue() *
514 Info.NumVectors);
516 case BuiltinType::SveCount:
517 return llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
518 #define PPC_VECTOR_TYPE(Name, Id, Size) \
519 case BuiltinType::Id: \
520 ResultType = \
521 llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \
522 break;
523 #include "clang/Basic/PPCTypes.def"
524 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
525 #include "clang/Basic/RISCVVTypes.def"
527 ASTContext::BuiltinVectorTypeInfo Info =
528 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
529 if (Info.NumVectors != 1) {
530 unsigned I8EltCount =
531 Info.EC.getKnownMinValue() *
532 ConvertType(Info.ElementType)->getScalarSizeInBits() / 8;
533 return llvm::TargetExtType::get(
534 getLLVMContext(), "riscv.vector.tuple",
535 llvm::ScalableVectorType::get(
536 llvm::Type::getInt8Ty(getLLVMContext()), I8EltCount),
537 Info.NumVectors);
539 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
540 Info.EC.getKnownMinValue());
542 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
543 case BuiltinType::Id: { \
544 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
545 ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \
546 else \
547 llvm_unreachable("Unexpected wasm reference builtin type!"); \
548 } break;
549 #include "clang/Basic/WebAssemblyReferenceTypes.def"
550 #define AMDGPU_OPAQUE_PTR_TYPE(Name, MangledName, AS, Width, Align, Id, \
551 SingletonId) \
552 case BuiltinType::Id: \
553 return llvm::PointerType::get(getLLVMContext(), AS);
554 #include "clang/Basic/AMDGPUTypes.def"
555 #define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
556 #include "clang/Basic/HLSLIntangibleTypes.def"
557 ResultType = CGM.getHLSLRuntime().convertHLSLSpecificType(Ty);
558 break;
559 case BuiltinType::Dependent:
560 #define BUILTIN_TYPE(Id, SingletonId)
561 #define PLACEHOLDER_TYPE(Id, SingletonId) \
562 case BuiltinType::Id:
563 #include "clang/AST/BuiltinTypes.def"
564 llvm_unreachable("Unexpected placeholder builtin type!");
566 break;
568 case Type::Auto:
569 case Type::DeducedTemplateSpecialization:
570 llvm_unreachable("Unexpected undeduced type!");
571 case Type::Complex: {
572 llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
573 ResultType = llvm::StructType::get(EltTy, EltTy);
574 break;
576 case Type::LValueReference:
577 case Type::RValueReference: {
578 const ReferenceType *RTy = cast<ReferenceType>(Ty);
579 QualType ETy = RTy->getPointeeType();
580 unsigned AS = getTargetAddressSpace(ETy);
581 ResultType = llvm::PointerType::get(getLLVMContext(), AS);
582 break;
584 case Type::Pointer: {
585 const PointerType *PTy = cast<PointerType>(Ty);
586 QualType ETy = PTy->getPointeeType();
587 unsigned AS = getTargetAddressSpace(ETy);
588 ResultType = llvm::PointerType::get(getLLVMContext(), AS);
589 break;
592 case Type::VariableArray: {
593 const VariableArrayType *A = cast<VariableArrayType>(Ty);
594 assert(A->getIndexTypeCVRQualifiers() == 0 &&
595 "FIXME: We only handle trivial array types so far!");
596 // VLAs resolve to the innermost element type; this matches
597 // the return of alloca, and there isn't any obviously better choice.
598 ResultType = ConvertTypeForMem(A->getElementType());
599 break;
601 case Type::IncompleteArray: {
602 const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
603 assert(A->getIndexTypeCVRQualifiers() == 0 &&
604 "FIXME: We only handle trivial array types so far!");
605 // int X[] -> [0 x int], unless the element type is not sized. If it is
606 // unsized (e.g. an incomplete struct) just use [0 x i8].
607 ResultType = ConvertTypeForMem(A->getElementType());
608 if (!ResultType->isSized()) {
609 SkippedLayout = true;
610 ResultType = llvm::Type::getInt8Ty(getLLVMContext());
612 ResultType = llvm::ArrayType::get(ResultType, 0);
613 break;
615 case Type::ArrayParameter:
616 case Type::ConstantArray: {
617 const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
618 llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
620 // Lower arrays of undefined struct type to arrays of i8 just to have a
621 // concrete type.
622 if (!EltTy->isSized()) {
623 SkippedLayout = true;
624 EltTy = llvm::Type::getInt8Ty(getLLVMContext());
627 ResultType = llvm::ArrayType::get(EltTy, A->getZExtSize());
628 break;
630 case Type::ExtVector:
631 case Type::Vector: {
632 const auto *VT = cast<VectorType>(Ty);
633 // An ext_vector_type of Bool is really a vector of bits.
634 llvm::Type *IRElemTy = VT->isExtVectorBoolType()
635 ? llvm::Type::getInt1Ty(getLLVMContext())
636 : ConvertType(VT->getElementType());
637 ResultType = llvm::FixedVectorType::get(IRElemTy, VT->getNumElements());
638 break;
640 case Type::ConstantMatrix: {
641 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
642 ResultType =
643 llvm::FixedVectorType::get(ConvertType(MT->getElementType()),
644 MT->getNumRows() * MT->getNumColumns());
645 break;
647 case Type::FunctionNoProto:
648 case Type::FunctionProto:
649 ResultType = ConvertFunctionTypeInternal(T);
650 break;
651 case Type::ObjCObject:
652 ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
653 break;
655 case Type::ObjCInterface: {
656 // Objective-C interfaces are always opaque (outside of the
657 // runtime, which can do whatever it likes); we never refine
658 // these.
659 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
660 if (!T)
661 T = llvm::StructType::create(getLLVMContext());
662 ResultType = T;
663 break;
666 case Type::ObjCObjectPointer:
667 ResultType = llvm::PointerType::getUnqual(getLLVMContext());
668 break;
670 case Type::Enum: {
671 const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
672 if (ED->isCompleteDefinition() || ED->isFixed())
673 return ConvertType(ED->getIntegerType());
674 // Return a placeholder 'i32' type. This can be changed later when the
675 // type is defined (see UpdateCompletedType), but is likely to be the
676 // "right" answer.
677 ResultType = llvm::Type::getInt32Ty(getLLVMContext());
678 break;
681 case Type::BlockPointer: {
682 // Block pointers lower to function type. For function type,
683 // getTargetAddressSpace() returns default address space for
684 // function pointer i.e. program address space. Therefore, for block
685 // pointers, it is important to pass the pointee AST address space when
686 // calling getTargetAddressSpace(), to ensure that we get the LLVM IR
687 // address space for data pointers and not function pointers.
688 const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
689 unsigned AS = Context.getTargetAddressSpace(FTy.getAddressSpace());
690 ResultType = llvm::PointerType::get(getLLVMContext(), AS);
691 break;
694 case Type::MemberPointer: {
695 auto *MPTy = cast<MemberPointerType>(Ty);
696 if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
697 auto *C = MPTy->getClass();
698 auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr});
699 if (Insertion.second)
700 Insertion.first->second = llvm::StructType::create(getLLVMContext());
701 ResultType = Insertion.first->second;
702 } else {
703 ResultType = getCXXABI().ConvertMemberPointerType(MPTy);
705 break;
708 case Type::Atomic: {
709 QualType valueType = cast<AtomicType>(Ty)->getValueType();
710 ResultType = ConvertTypeForMem(valueType);
712 // Pad out to the inflated size if necessary.
713 uint64_t valueSize = Context.getTypeSize(valueType);
714 uint64_t atomicSize = Context.getTypeSize(Ty);
715 if (valueSize != atomicSize) {
716 assert(valueSize < atomicSize);
717 llvm::Type *elts[] = {
718 ResultType,
719 llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8)
721 ResultType =
722 llvm::StructType::get(getLLVMContext(), llvm::ArrayRef(elts));
724 break;
726 case Type::Pipe: {
727 ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
728 break;
730 case Type::BitInt: {
731 const auto &EIT = cast<BitIntType>(Ty);
732 ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
733 break;
737 assert(ResultType && "Didn't convert a type?");
738 assert((!CachedType || CachedType == ResultType) &&
739 "Cached type doesn't match computed type");
741 TypeCache[Ty] = ResultType;
742 return ResultType;
745 bool CodeGenModule::isPaddedAtomicType(QualType type) {
746 return isPaddedAtomicType(type->castAs<AtomicType>());
749 bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
750 return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType());
753 /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
754 llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
755 // TagDecl's are not necessarily unique, instead use the (clang)
756 // type connected to the decl.
757 const Type *Key = Context.getTagDeclType(RD).getTypePtr();
759 llvm::StructType *&Entry = RecordDeclTypes[Key];
761 // If we don't have a StructType at all yet, create the forward declaration.
762 if (!Entry) {
763 Entry = llvm::StructType::create(getLLVMContext());
764 addRecordTypeName(RD, Entry, "");
766 llvm::StructType *Ty = Entry;
768 // If this is still a forward declaration, or the LLVM type is already
769 // complete, there's nothing more to do.
770 RD = RD->getDefinition();
771 if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
772 return Ty;
774 // Force conversion of non-virtual base classes recursively.
775 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
776 for (const auto &I : CRD->bases()) {
777 if (I.isVirtual()) continue;
778 ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl());
782 // Layout fields.
783 std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
784 CGRecordLayouts[Key] = std::move(Layout);
786 // If this struct blocked a FunctionType conversion, then recompute whatever
787 // was derived from that.
788 // FIXME: This is hugely overconservative.
789 if (SkippedLayout)
790 TypeCache.clear();
792 return Ty;
795 /// getCGRecordLayout - Return record layout info for the given record decl.
796 const CGRecordLayout &
797 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
798 const Type *Key = Context.getTagDeclType(RD).getTypePtr();
800 auto I = CGRecordLayouts.find(Key);
801 if (I != CGRecordLayouts.end())
802 return *I->second;
803 // Compute the type information.
804 ConvertRecordDeclType(RD);
806 // Now try again.
807 I = CGRecordLayouts.find(Key);
809 assert(I != CGRecordLayouts.end() &&
810 "Unable to find record layout information for type");
811 return *I->second;
814 bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
815 assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type");
816 return isZeroInitializable(T);
819 bool CodeGenTypes::isZeroInitializable(QualType T) {
820 if (T->getAs<PointerType>())
821 return Context.getTargetNullPointerValue(T) == 0;
823 if (const auto *AT = Context.getAsArrayType(T)) {
824 if (isa<IncompleteArrayType>(AT))
825 return true;
826 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
827 if (Context.getConstantArrayElementCount(CAT) == 0)
828 return true;
829 T = Context.getBaseElementType(T);
832 // Records are non-zero-initializable if they contain any
833 // non-zero-initializable subobjects.
834 if (const RecordType *RT = T->getAs<RecordType>()) {
835 const RecordDecl *RD = RT->getDecl();
836 return isZeroInitializable(RD);
839 // We have to ask the ABI about member pointers.
840 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
841 return getCXXABI().isZeroInitializable(MPT);
843 // Everything else is okay.
844 return true;
847 bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
848 return getCGRecordLayout(RD).isZeroInitializable();
851 unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const {
852 // Return the address space for the type. If the type is a
853 // function type without an address space qualifier, the
854 // program address space is used. Otherwise, the target picks
855 // the best address space based on the type information
856 return T->isFunctionType() && !T.hasAddressSpace()
857 ? getDataLayout().getProgramAddressSpace()
858 : getContext().getTargetAddressSpace(T.getAddressSpace());