1 //===- ABIInfoImpl.cpp ----------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "ABIInfoImpl.h"
11 using namespace clang
;
12 using namespace clang::CodeGen
;
14 // Pin the vtable to this file.
15 DefaultABIInfo::~DefaultABIInfo() = default;
17 ABIArgInfo
DefaultABIInfo::classifyArgumentType(QualType Ty
) const {
18 Ty
= useFirstFieldIfTransparentUnion(Ty
);
20 if (isAggregateTypeForABI(Ty
)) {
21 // Records with non-trivial destructors/copy-constructors should not be
23 if (CGCXXABI::RecordArgABI RAA
= getRecordArgABI(Ty
, getCXXABI()))
24 return getNaturalAlignIndirect(Ty
, RAA
== CGCXXABI::RAA_DirectInMemory
);
26 return getNaturalAlignIndirect(Ty
);
29 // Treat an enum type as its underlying type.
30 if (const EnumType
*EnumTy
= Ty
->getAs
<EnumType
>())
31 Ty
= EnumTy
->getDecl()->getIntegerType();
33 ASTContext
&Context
= getContext();
34 if (const auto *EIT
= Ty
->getAs
<BitIntType
>())
35 if (EIT
->getNumBits() >
36 Context
.getTypeSize(Context
.getTargetInfo().hasInt128Type()
38 : Context
.LongLongTy
))
39 return getNaturalAlignIndirect(Ty
);
41 return (isPromotableIntegerTypeForABI(Ty
)
42 ? ABIArgInfo::getExtend(Ty
, CGT
.ConvertType(Ty
))
43 : ABIArgInfo::getDirect());
46 ABIArgInfo
DefaultABIInfo::classifyReturnType(QualType RetTy
) const {
47 if (RetTy
->isVoidType())
48 return ABIArgInfo::getIgnore();
50 if (isAggregateTypeForABI(RetTy
))
51 return getNaturalAlignIndirect(RetTy
);
53 // Treat an enum type as its underlying type.
54 if (const EnumType
*EnumTy
= RetTy
->getAs
<EnumType
>())
55 RetTy
= EnumTy
->getDecl()->getIntegerType();
57 if (const auto *EIT
= RetTy
->getAs
<BitIntType
>())
58 if (EIT
->getNumBits() >
59 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
60 ? getContext().Int128Ty
61 : getContext().LongLongTy
))
62 return getNaturalAlignIndirect(RetTy
);
64 return (isPromotableIntegerTypeForABI(RetTy
) ? ABIArgInfo::getExtend(RetTy
)
65 : ABIArgInfo::getDirect());
68 void DefaultABIInfo::computeInfo(CGFunctionInfo
&FI
) const {
69 if (!getCXXABI().classifyReturnType(FI
))
70 FI
.getReturnInfo() = classifyReturnType(FI
.getReturnType());
71 for (auto &I
: FI
.arguments())
72 I
.info
= classifyArgumentType(I
.type
);
75 RValue
DefaultABIInfo::EmitVAArg(CodeGenFunction
&CGF
, Address VAListAddr
,
76 QualType Ty
, AggValueSlot Slot
) const {
77 return CGF
.EmitLoadOfAnyValue(
79 EmitVAArgInstr(CGF
, VAListAddr
, Ty
, classifyArgumentType(Ty
)), Ty
),
83 ABIArgInfo
CodeGen::coerceToIntArray(QualType Ty
, ASTContext
&Context
,
84 llvm::LLVMContext
&LLVMContext
) {
85 // Alignment and Size are measured in bits.
86 const uint64_t Size
= Context
.getTypeSize(Ty
);
87 const uint64_t Alignment
= Context
.getTypeAlign(Ty
);
88 llvm::Type
*IntType
= llvm::Type::getIntNTy(LLVMContext
, Alignment
);
89 const uint64_t NumElements
= (Size
+ Alignment
- 1) / Alignment
;
90 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType
, NumElements
));
93 void CodeGen::AssignToArrayRange(CodeGen::CGBuilderTy
&Builder
,
94 llvm::Value
*Array
, llvm::Value
*Value
,
95 unsigned FirstIndex
, unsigned LastIndex
) {
96 // Alternatively, we could emit this as a loop in the source.
97 for (unsigned I
= FirstIndex
; I
<= LastIndex
; ++I
) {
99 Builder
.CreateConstInBoundsGEP1_32(Builder
.getInt8Ty(), Array
, I
);
100 Builder
.CreateAlignedStore(Value
, Cell
, CharUnits::One());
104 bool CodeGen::isAggregateTypeForABI(QualType T
) {
105 return !CodeGenFunction::hasScalarEvaluationKind(T
) ||
106 T
->isMemberFunctionPointerType();
109 llvm::Type
*CodeGen::getVAListElementType(CodeGenFunction
&CGF
) {
110 return CGF
.ConvertTypeForMem(
111 CGF
.getContext().getBuiltinVaListType()->getPointeeType());
114 CGCXXABI::RecordArgABI
CodeGen::getRecordArgABI(const RecordType
*RT
,
116 const CXXRecordDecl
*RD
= dyn_cast
<CXXRecordDecl
>(RT
->getDecl());
118 if (!RT
->getDecl()->canPassInRegisters())
119 return CGCXXABI::RAA_Indirect
;
120 return CGCXXABI::RAA_Default
;
122 return CXXABI
.getRecordArgABI(RD
);
125 CGCXXABI::RecordArgABI
CodeGen::getRecordArgABI(QualType T
, CGCXXABI
&CXXABI
) {
126 const RecordType
*RT
= T
->getAs
<RecordType
>();
128 return CGCXXABI::RAA_Default
;
129 return getRecordArgABI(RT
, CXXABI
);
132 bool CodeGen::classifyReturnType(const CGCXXABI
&CXXABI
, CGFunctionInfo
&FI
,
133 const ABIInfo
&Info
) {
134 QualType Ty
= FI
.getReturnType();
136 if (const auto *RT
= Ty
->getAs
<RecordType
>())
137 if (!isa
<CXXRecordDecl
>(RT
->getDecl()) &&
138 !RT
->getDecl()->canPassInRegisters()) {
139 FI
.getReturnInfo() = Info
.getNaturalAlignIndirect(Ty
);
143 return CXXABI
.classifyReturnType(FI
);
146 QualType
CodeGen::useFirstFieldIfTransparentUnion(QualType Ty
) {
147 if (const RecordType
*UT
= Ty
->getAsUnionType()) {
148 const RecordDecl
*UD
= UT
->getDecl();
149 if (UD
->hasAttr
<TransparentUnionAttr
>()) {
150 assert(!UD
->field_empty() && "sema created an empty transparent union");
151 return UD
->field_begin()->getType();
157 llvm::Value
*CodeGen::emitRoundPointerUpToAlignment(CodeGenFunction
&CGF
,
160 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
161 llvm::Value
*RoundUp
= CGF
.Builder
.CreateConstInBoundsGEP1_32(
162 CGF
.Builder
.getInt8Ty(), Ptr
, Align
.getQuantity() - 1);
163 return CGF
.Builder
.CreateIntrinsic(
164 llvm::Intrinsic::ptrmask
, {Ptr
->getType(), CGF
.IntPtrTy
},
165 {RoundUp
, llvm::ConstantInt::get(CGF
.IntPtrTy
, -Align
.getQuantity())},
166 nullptr, Ptr
->getName() + ".aligned");
170 CodeGen::emitVoidPtrDirectVAArg(CodeGenFunction
&CGF
, Address VAListAddr
,
171 llvm::Type
*DirectTy
, CharUnits DirectSize
,
172 CharUnits DirectAlign
, CharUnits SlotSize
,
173 bool AllowHigherAlign
, bool ForceRightAdjust
) {
174 // Cast the element type to i8* if necessary. Some platforms define
175 // va_list as a struct containing an i8* instead of just an i8*.
176 if (VAListAddr
.getElementType() != CGF
.Int8PtrTy
)
177 VAListAddr
= VAListAddr
.withElementType(CGF
.Int8PtrTy
);
179 llvm::Value
*Ptr
= CGF
.Builder
.CreateLoad(VAListAddr
, "argp.cur");
181 // If the CC aligns values higher than the slot size, do so if needed.
182 Address Addr
= Address::invalid();
183 if (AllowHigherAlign
&& DirectAlign
> SlotSize
) {
184 Addr
= Address(emitRoundPointerUpToAlignment(CGF
, Ptr
, DirectAlign
),
185 CGF
.Int8Ty
, DirectAlign
);
187 Addr
= Address(Ptr
, CGF
.Int8Ty
, SlotSize
);
190 // Advance the pointer past the argument, then store that back.
191 CharUnits FullDirectSize
= DirectSize
.alignTo(SlotSize
);
193 CGF
.Builder
.CreateConstInBoundsByteGEP(Addr
, FullDirectSize
, "argp.next");
194 CGF
.Builder
.CreateStore(NextPtr
.emitRawPointer(CGF
), VAListAddr
);
196 // If the argument is smaller than a slot, and this is a big-endian
197 // target, the argument will be right-adjusted in its slot.
198 if (DirectSize
< SlotSize
&& CGF
.CGM
.getDataLayout().isBigEndian() &&
199 (!DirectTy
->isStructTy() || ForceRightAdjust
)) {
200 Addr
= CGF
.Builder
.CreateConstInBoundsByteGEP(Addr
, SlotSize
- DirectSize
);
203 return Addr
.withElementType(DirectTy
);
206 RValue
CodeGen::emitVoidPtrVAArg(CodeGenFunction
&CGF
, Address VAListAddr
,
207 QualType ValueTy
, bool IsIndirect
,
208 TypeInfoChars ValueInfo
,
209 CharUnits SlotSizeAndAlign
,
210 bool AllowHigherAlign
, AggValueSlot Slot
,
211 bool ForceRightAdjust
) {
212 // The size and alignment of the value that was passed directly.
213 CharUnits DirectSize
, DirectAlign
;
215 DirectSize
= CGF
.getPointerSize();
216 DirectAlign
= CGF
.getPointerAlign();
218 DirectSize
= ValueInfo
.Width
;
219 DirectAlign
= ValueInfo
.Align
;
222 // Cast the address we've calculated to the right type.
223 llvm::Type
*DirectTy
= CGF
.ConvertTypeForMem(ValueTy
), *ElementTy
= DirectTy
;
225 unsigned AllocaAS
= CGF
.CGM
.getDataLayout().getAllocaAddrSpace();
226 DirectTy
= llvm::PointerType::get(CGF
.getLLVMContext(), AllocaAS
);
229 Address Addr
= emitVoidPtrDirectVAArg(CGF
, VAListAddr
, DirectTy
, DirectSize
,
230 DirectAlign
, SlotSizeAndAlign
,
231 AllowHigherAlign
, ForceRightAdjust
);
234 Addr
= Address(CGF
.Builder
.CreateLoad(Addr
), ElementTy
, ValueInfo
.Align
);
237 return CGF
.EmitLoadOfAnyValue(CGF
.MakeAddrLValue(Addr
, ValueTy
), Slot
);
240 Address
CodeGen::emitMergePHI(CodeGenFunction
&CGF
, Address Addr1
,
241 llvm::BasicBlock
*Block1
, Address Addr2
,
242 llvm::BasicBlock
*Block2
,
243 const llvm::Twine
&Name
) {
244 assert(Addr1
.getType() == Addr2
.getType());
245 llvm::PHINode
*PHI
= CGF
.Builder
.CreatePHI(Addr1
.getType(), 2, Name
);
246 PHI
->addIncoming(Addr1
.emitRawPointer(CGF
), Block1
);
247 PHI
->addIncoming(Addr2
.emitRawPointer(CGF
), Block2
);
248 CharUnits Align
= std::min(Addr1
.getAlignment(), Addr2
.getAlignment());
249 return Address(PHI
, Addr1
.getElementType(), Align
);
252 bool CodeGen::isEmptyField(ASTContext
&Context
, const FieldDecl
*FD
,
253 bool AllowArrays
, bool AsIfNoUniqueAddr
) {
254 if (FD
->isUnnamedBitField())
257 QualType FT
= FD
->getType();
259 // Constant arrays of empty records count as empty, strip them off.
260 // Constant arrays of zero length always count as empty.
261 bool WasArray
= false;
263 while (const ConstantArrayType
*AT
= Context
.getAsConstantArrayType(FT
)) {
264 if (AT
->isZeroSize())
266 FT
= AT
->getElementType();
267 // The [[no_unique_address]] special case below does not apply to
268 // arrays of C++ empty records, so we need to remember this fact.
272 const RecordType
*RT
= FT
->getAs
<RecordType
>();
276 // C++ record fields are never empty, at least in the Itanium ABI.
278 // FIXME: We should use a predicate for whether this behavior is true in the
281 // The exception to the above rule are fields marked with the
282 // [[no_unique_address]] attribute (since C++20). Those do count as empty
283 // according to the Itanium ABI. The exception applies only to records,
284 // not arrays of records, so we must also check whether we stripped off an
286 if (isa
<CXXRecordDecl
>(RT
->getDecl()) &&
287 (WasArray
|| (!AsIfNoUniqueAddr
&& !FD
->hasAttr
<NoUniqueAddressAttr
>())))
290 return isEmptyRecord(Context
, FT
, AllowArrays
, AsIfNoUniqueAddr
);
293 bool CodeGen::isEmptyRecord(ASTContext
&Context
, QualType T
, bool AllowArrays
,
294 bool AsIfNoUniqueAddr
) {
295 const RecordType
*RT
= T
->getAs
<RecordType
>();
298 const RecordDecl
*RD
= RT
->getDecl();
299 if (RD
->hasFlexibleArrayMember())
302 // If this is a C++ record, check the bases first.
303 if (const CXXRecordDecl
*CXXRD
= dyn_cast
<CXXRecordDecl
>(RD
))
304 for (const auto &I
: CXXRD
->bases())
305 if (!isEmptyRecord(Context
, I
.getType(), true, AsIfNoUniqueAddr
))
308 for (const auto *I
: RD
->fields())
309 if (!isEmptyField(Context
, I
, AllowArrays
, AsIfNoUniqueAddr
))
314 bool CodeGen::isEmptyFieldForLayout(const ASTContext
&Context
,
315 const FieldDecl
*FD
) {
316 if (FD
->isZeroLengthBitField(Context
))
319 if (FD
->isUnnamedBitField())
322 return isEmptyRecordForLayout(Context
, FD
->getType());
325 bool CodeGen::isEmptyRecordForLayout(const ASTContext
&Context
, QualType T
) {
326 const RecordType
*RT
= T
->getAs
<RecordType
>();
330 const RecordDecl
*RD
= RT
->getDecl();
332 // If this is a C++ record, check the bases first.
333 if (const CXXRecordDecl
*CXXRD
= dyn_cast
<CXXRecordDecl
>(RD
)) {
334 if (CXXRD
->isDynamicClass())
337 for (const auto &I
: CXXRD
->bases())
338 if (!isEmptyRecordForLayout(Context
, I
.getType()))
342 for (const auto *I
: RD
->fields())
343 if (!isEmptyFieldForLayout(Context
, I
))
349 const Type
*CodeGen::isSingleElementStruct(QualType T
, ASTContext
&Context
) {
350 const RecordType
*RT
= T
->getAs
<RecordType
>();
354 const RecordDecl
*RD
= RT
->getDecl();
355 if (RD
->hasFlexibleArrayMember())
358 const Type
*Found
= nullptr;
360 // If this is a C++ record, check the bases first.
361 if (const CXXRecordDecl
*CXXRD
= dyn_cast
<CXXRecordDecl
>(RD
)) {
362 for (const auto &I
: CXXRD
->bases()) {
363 // Ignore empty records.
364 if (isEmptyRecord(Context
, I
.getType(), true))
367 // If we already found an element then this isn't a single-element struct.
371 // If this is non-empty and not a single element struct, the composite
372 // cannot be a single element struct.
373 Found
= isSingleElementStruct(I
.getType(), Context
);
379 // Check for single element.
380 for (const auto *FD
: RD
->fields()) {
381 QualType FT
= FD
->getType();
383 // Ignore empty fields.
384 if (isEmptyField(Context
, FD
, true))
387 // If we already found an element then this isn't a single-element
392 // Treat single element arrays as the element.
393 while (const ConstantArrayType
*AT
= Context
.getAsConstantArrayType(FT
)) {
394 if (AT
->getZExtSize() != 1)
396 FT
= AT
->getElementType();
399 if (!isAggregateTypeForABI(FT
)) {
400 Found
= FT
.getTypePtr();
402 Found
= isSingleElementStruct(FT
, Context
);
408 // We don't consider a struct a single-element struct if it has
409 // padding beyond the element type.
410 if (Found
&& Context
.getTypeSize(Found
) != Context
.getTypeSize(T
))
416 Address
CodeGen::EmitVAArgInstr(CodeGenFunction
&CGF
, Address VAListAddr
,
417 QualType Ty
, const ABIArgInfo
&AI
) {
418 // This default implementation defers to the llvm backend's va_arg
419 // instruction. It can handle only passing arguments directly
420 // (typically only handled in the backend for primitive types), or
421 // aggregates passed indirectly by pointer (NOTE: if the "byval"
422 // flag has ABI impact in the callee, this implementation cannot
425 // Only a few cases are covered here at the moment -- those needed
426 // by the default abi.
429 if (AI
.isIndirect()) {
430 assert(!AI
.getPaddingType() &&
431 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
433 !AI
.getIndirectRealign() &&
434 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
436 auto TyInfo
= CGF
.getContext().getTypeInfoInChars(Ty
);
437 CharUnits TyAlignForABI
= TyInfo
.Align
;
439 llvm::Type
*ElementTy
= CGF
.ConvertTypeForMem(Ty
);
440 llvm::Type
*BaseTy
= llvm::PointerType::getUnqual(ElementTy
);
442 CGF
.Builder
.CreateVAArg(VAListAddr
.emitRawPointer(CGF
), BaseTy
);
443 return Address(Addr
, ElementTy
, TyAlignForABI
);
445 assert((AI
.isDirect() || AI
.isExtend()) &&
446 "Unexpected ArgInfo Kind in generic VAArg emitter!");
448 assert(!AI
.getInReg() &&
449 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
450 assert(!AI
.getPaddingType() &&
451 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
452 assert(!AI
.getDirectOffset() &&
453 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
454 assert(!AI
.getCoerceToType() &&
455 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
457 Address Temp
= CGF
.CreateMemTemp(Ty
, "varet");
458 Val
= CGF
.Builder
.CreateVAArg(VAListAddr
.emitRawPointer(CGF
),
459 CGF
.ConvertTypeForMem(Ty
));
460 CGF
.Builder
.CreateStore(Val
, Temp
);
465 bool CodeGen::isSIMDVectorType(ASTContext
&Context
, QualType Ty
) {
466 return Ty
->getAs
<VectorType
>() && Context
.getTypeSize(Ty
) == 128;
469 bool CodeGen::isRecordWithSIMDVectorType(ASTContext
&Context
, QualType Ty
) {
470 const RecordType
*RT
= Ty
->getAs
<RecordType
>();
473 const RecordDecl
*RD
= RT
->getDecl();
475 // If this is a C++ record, check the bases first.
476 if (const CXXRecordDecl
*CXXRD
= dyn_cast
<CXXRecordDecl
>(RD
))
477 for (const auto &I
: CXXRD
->bases())
478 if (!isRecordWithSIMDVectorType(Context
, I
.getType()))
481 for (const auto *i
: RD
->fields()) {
482 QualType FT
= i
->getType();
484 if (isSIMDVectorType(Context
, FT
))
487 if (isRecordWithSIMDVectorType(Context
, FT
))