[clang][modules] Don't prevent translation of FW_Private includes when explicitly...
[llvm-project.git] / clang / lib / CodeGen / Targets / ARM.cpp
blobd7d175ff1724f74a08093508c94b919593bb9191
1 //===- ARM.cpp ------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
12 using namespace clang;
13 using namespace clang::CodeGen;
15 //===----------------------------------------------------------------------===//
16 // ARM ABI Implementation
17 //===----------------------------------------------------------------------===//
19 namespace {
21 class ARMABIInfo : public ABIInfo {
22 ARMABIKind Kind;
23 bool IsFloatABISoftFP;
25 public:
26 ARMABIInfo(CodeGenTypes &CGT, ARMABIKind Kind) : ABIInfo(CGT), Kind(Kind) {
27 setCCs();
28 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
29 CGT.getCodeGenOpts().FloatABI == ""; // default
32 bool isEABI() const {
33 switch (getTarget().getTriple().getEnvironment()) {
34 case llvm::Triple::Android:
35 case llvm::Triple::EABI:
36 case llvm::Triple::EABIHF:
37 case llvm::Triple::GNUEABI:
38 case llvm::Triple::GNUEABIHF:
39 case llvm::Triple::MuslEABI:
40 case llvm::Triple::MuslEABIHF:
41 return true;
42 default:
43 return getTarget().getTriple().isOHOSFamily();
47 bool isEABIHF() const {
48 switch (getTarget().getTriple().getEnvironment()) {
49 case llvm::Triple::EABIHF:
50 case llvm::Triple::GNUEABIHF:
51 case llvm::Triple::MuslEABIHF:
52 return true;
53 default:
54 return false;
58 ARMABIKind getABIKind() const { return Kind; }
60 bool allowBFloatArgsAndRet() const override {
61 return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
64 private:
65 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
66 unsigned functionCallConv) const;
67 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
68 unsigned functionCallConv) const;
69 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
70 uint64_t Members) const;
71 ABIArgInfo coerceIllegalVector(QualType Ty) const;
72 bool isIllegalVectorType(QualType Ty) const;
73 bool containsAnyFP16Vectors(QualType Ty) const;
75 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
76 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
77 uint64_t Members) const override;
78 bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
80 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
82 void computeInfo(CGFunctionInfo &FI) const override;
84 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
85 QualType Ty) const override;
87 llvm::CallingConv::ID getLLVMDefaultCC() const;
88 llvm::CallingConv::ID getABIDefaultCC() const;
89 void setCCs();
92 class ARMSwiftABIInfo : public SwiftABIInfo {
93 public:
94 explicit ARMSwiftABIInfo(CodeGenTypes &CGT)
95 : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
97 bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
98 unsigned NumElts) const override;
101 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
102 public:
103 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
104 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {
105 SwiftInfo = std::make_unique<ARMSwiftABIInfo>(CGT);
108 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
109 return 13;
112 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
113 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
116 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
117 llvm::Value *Address) const override {
118 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
120 // 0-15 are the 16 integer registers.
121 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
122 return false;
125 unsigned getSizeOfUnwindException() const override {
126 if (getABIInfo<ARMABIInfo>().isEABI())
127 return 88;
128 return TargetCodeGenInfo::getSizeOfUnwindException();
131 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
132 CodeGen::CodeGenModule &CGM) const override {
133 if (GV->isDeclaration())
134 return;
135 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
136 if (!FD)
137 return;
138 auto *Fn = cast<llvm::Function>(GV);
140 if (const auto *TA = FD->getAttr<TargetAttr>()) {
141 ParsedTargetAttr Attr =
142 CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
143 if (!Attr.BranchProtection.empty()) {
144 TargetInfo::BranchProtectionInfo BPI;
145 StringRef DiagMsg;
146 StringRef Arch =
147 Attr.CPU.empty() ? CGM.getTarget().getTargetOpts().CPU : Attr.CPU;
148 if (!CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
149 Arch, BPI, DiagMsg)) {
150 CGM.getDiags().Report(
151 D->getLocation(),
152 diag::warn_target_unsupported_branch_protection_attribute)
153 << Arch;
154 } else {
155 static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
156 assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 &&
157 "Unexpected SignReturnAddressScopeKind");
158 Fn->addFnAttr(
159 "sign-return-address",
160 SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
162 Fn->addFnAttr("branch-target-enforcement",
163 BPI.BranchTargetEnforcement ? "true" : "false");
165 } else if (CGM.getLangOpts().BranchTargetEnforcement ||
166 CGM.getLangOpts().hasSignReturnAddress()) {
167 // If the Branch Protection attribute is missing, validate the target
168 // Architecture attribute against Branch Protection command line
169 // settings.
170 if (!CGM.getTarget().isBranchProtectionSupportedArch(Attr.CPU))
171 CGM.getDiags().Report(
172 D->getLocation(),
173 diag::warn_target_unsupported_branch_protection_attribute)
174 << Attr.CPU;
178 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
179 if (!Attr)
180 return;
182 const char *Kind;
183 switch (Attr->getInterrupt()) {
184 case ARMInterruptAttr::Generic: Kind = ""; break;
185 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
186 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
187 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
188 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
189 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
192 Fn->addFnAttr("interrupt", Kind);
194 ARMABIKind ABI = getABIInfo<ARMABIInfo>().getABIKind();
195 if (ABI == ARMABIKind::APCS)
196 return;
198 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
199 // however this is not necessarily true on taking any interrupt. Instruct
200 // the backend to perform a realignment as part of the function prologue.
201 llvm::AttrBuilder B(Fn->getContext());
202 B.addStackAlignmentAttr(8);
203 Fn->addFnAttrs(B);
207 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
208 public:
209 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
210 : ARMTargetCodeGenInfo(CGT, K) {}
212 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
213 CodeGen::CodeGenModule &CGM) const override;
215 void getDependentLibraryOption(llvm::StringRef Lib,
216 llvm::SmallString<24> &Opt) const override {
217 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
220 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
221 llvm::SmallString<32> &Opt) const override {
222 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
226 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
227 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
228 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
229 if (GV->isDeclaration())
230 return;
231 addStackProbeTargetAttributes(D, GV, CGM);
235 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
236 if (!::classifyReturnType(getCXXABI(), FI, *this))
237 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
238 FI.getCallingConvention());
240 for (auto &I : FI.arguments())
241 I.info = classifyArgumentType(I.type, FI.isVariadic(),
242 FI.getCallingConvention());
245 // Always honor user-specified calling convention.
246 if (FI.getCallingConvention() != llvm::CallingConv::C)
247 return;
249 llvm::CallingConv::ID cc = getRuntimeCC();
250 if (cc != llvm::CallingConv::C)
251 FI.setEffectiveCallingConvention(cc);
254 /// Return the default calling convention that LLVM will use.
255 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
256 // The default calling convention that LLVM will infer.
257 if (isEABIHF() || getTarget().getTriple().isWatchABI())
258 return llvm::CallingConv::ARM_AAPCS_VFP;
259 else if (isEABI())
260 return llvm::CallingConv::ARM_AAPCS;
261 else
262 return llvm::CallingConv::ARM_APCS;
265 /// Return the calling convention that our ABI would like us to use
266 /// as the C calling convention.
267 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
268 switch (getABIKind()) {
269 case ARMABIKind::APCS:
270 return llvm::CallingConv::ARM_APCS;
271 case ARMABIKind::AAPCS:
272 return llvm::CallingConv::ARM_AAPCS;
273 case ARMABIKind::AAPCS_VFP:
274 return llvm::CallingConv::ARM_AAPCS_VFP;
275 case ARMABIKind::AAPCS16_VFP:
276 return llvm::CallingConv::ARM_AAPCS_VFP;
278 llvm_unreachable("bad ABI kind");
281 void ARMABIInfo::setCCs() {
282 assert(getRuntimeCC() == llvm::CallingConv::C);
284 // Don't muddy up the IR with a ton of explicit annotations if
285 // they'd just match what LLVM will infer from the triple.
286 llvm::CallingConv::ID abiCC = getABIDefaultCC();
287 if (abiCC != getLLVMDefaultCC())
288 RuntimeCC = abiCC;
291 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
292 uint64_t Size = getContext().getTypeSize(Ty);
293 if (Size <= 32) {
294 llvm::Type *ResType =
295 llvm::Type::getInt32Ty(getVMContext());
296 return ABIArgInfo::getDirect(ResType);
298 if (Size == 64 || Size == 128) {
299 auto *ResType = llvm::FixedVectorType::get(
300 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
301 return ABIArgInfo::getDirect(ResType);
303 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
306 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
307 const Type *Base,
308 uint64_t Members) const {
309 assert(Base && "Base class should be set for homogeneous aggregate");
310 // Base can be a floating-point or a vector.
311 if (const VectorType *VT = Base->getAs<VectorType>()) {
312 // FP16 vectors should be converted to integer vectors
313 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
314 uint64_t Size = getContext().getTypeSize(VT);
315 auto *NewVecTy = llvm::FixedVectorType::get(
316 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
317 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
318 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
321 unsigned Align = 0;
322 if (getABIKind() == ARMABIKind::AAPCS ||
323 getABIKind() == ARMABIKind::AAPCS_VFP) {
324 // For alignment adjusted HFAs, cap the argument alignment to 8, leave it
325 // default otherwise.
326 Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
327 unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
328 Align = (Align > BaseAlign && Align >= 8) ? 8 : 0;
330 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align);
333 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
334 unsigned functionCallConv) const {
335 // 6.1.2.1 The following argument types are VFP CPRCs:
336 // A single-precision floating-point type (including promoted
337 // half-precision types); A double-precision floating-point type;
338 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
339 // with a Base Type of a single- or double-precision floating-point type,
340 // 64-bit containerized vectors or 128-bit containerized vectors with one
341 // to four Elements.
342 // Variadic functions should always marshal to the base standard.
343 bool IsAAPCS_VFP =
344 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
346 Ty = useFirstFieldIfTransparentUnion(Ty);
348 // Handle illegal vector types here.
349 if (isIllegalVectorType(Ty))
350 return coerceIllegalVector(Ty);
352 if (!isAggregateTypeForABI(Ty)) {
353 // Treat an enum type as its underlying type.
354 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
355 Ty = EnumTy->getDecl()->getIntegerType();
358 if (const auto *EIT = Ty->getAs<BitIntType>())
359 if (EIT->getNumBits() > 64)
360 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
362 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
363 : ABIArgInfo::getDirect());
366 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
367 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
370 // Ignore empty records.
371 if (isEmptyRecord(getContext(), Ty, true))
372 return ABIArgInfo::getIgnore();
374 if (IsAAPCS_VFP) {
375 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
376 // into VFP registers.
377 const Type *Base = nullptr;
378 uint64_t Members = 0;
379 if (isHomogeneousAggregate(Ty, Base, Members))
380 return classifyHomogeneousAggregate(Ty, Base, Members);
381 } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) {
382 // WatchOS does have homogeneous aggregates. Note that we intentionally use
383 // this convention even for a variadic function: the backend will use GPRs
384 // if needed.
385 const Type *Base = nullptr;
386 uint64_t Members = 0;
387 if (isHomogeneousAggregate(Ty, Base, Members)) {
388 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
389 llvm::Type *Ty =
390 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
391 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
395 if (getABIKind() == ARMABIKind::AAPCS16_VFP &&
396 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
397 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
398 // bigger than 128-bits, they get placed in space allocated by the caller,
399 // and a pointer is passed.
400 return ABIArgInfo::getIndirect(
401 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
404 // Support byval for ARM.
405 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
406 // most 8-byte. We realign the indirect argument if type alignment is bigger
407 // than ABI alignment.
408 uint64_t ABIAlign = 4;
409 uint64_t TyAlign;
410 if (getABIKind() == ARMABIKind::AAPCS_VFP ||
411 getABIKind() == ARMABIKind::AAPCS) {
412 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
413 ABIAlign = std::clamp(TyAlign, (uint64_t)4, (uint64_t)8);
414 } else {
415 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
417 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
418 assert(getABIKind() != ARMABIKind::AAPCS16_VFP && "unexpected byval");
419 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
420 /*ByVal=*/true,
421 /*Realign=*/TyAlign > ABIAlign);
424 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
425 // same size and alignment.
426 if (getTarget().isRenderScriptTarget()) {
427 return coerceToIntArray(Ty, getContext(), getVMContext());
430 // Otherwise, pass by coercing to a structure of the appropriate size.
431 llvm::Type* ElemTy;
432 unsigned SizeRegs;
433 // FIXME: Try to match the types of the arguments more accurately where
434 // we can.
435 if (TyAlign <= 4) {
436 ElemTy = llvm::Type::getInt32Ty(getVMContext());
437 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
438 } else {
439 ElemTy = llvm::Type::getInt64Ty(getVMContext());
440 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
443 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
446 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
447 llvm::LLVMContext &VMContext) {
448 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
449 // is called integer-like if its size is less than or equal to one word, and
450 // the offset of each of its addressable sub-fields is zero.
452 uint64_t Size = Context.getTypeSize(Ty);
454 // Check that the type fits in a word.
455 if (Size > 32)
456 return false;
458 // FIXME: Handle vector types!
459 if (Ty->isVectorType())
460 return false;
462 // Float types are never treated as "integer like".
463 if (Ty->isRealFloatingType())
464 return false;
466 // If this is a builtin or pointer type then it is ok.
467 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
468 return true;
470 // Small complex integer types are "integer like".
471 if (const ComplexType *CT = Ty->getAs<ComplexType>())
472 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
474 // Single element and zero sized arrays should be allowed, by the definition
475 // above, but they are not.
477 // Otherwise, it must be a record type.
478 const RecordType *RT = Ty->getAs<RecordType>();
479 if (!RT) return false;
481 // Ignore records with flexible arrays.
482 const RecordDecl *RD = RT->getDecl();
483 if (RD->hasFlexibleArrayMember())
484 return false;
486 // Check that all sub-fields are at offset 0, and are themselves "integer
487 // like".
488 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
490 bool HadField = false;
491 unsigned idx = 0;
492 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
493 i != e; ++i, ++idx) {
494 const FieldDecl *FD = *i;
496 // Bit-fields are not addressable, we only need to verify they are "integer
497 // like". We still have to disallow a subsequent non-bitfield, for example:
498 // struct { int : 0; int x }
499 // is non-integer like according to gcc.
500 if (FD->isBitField()) {
501 if (!RD->isUnion())
502 HadField = true;
504 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
505 return false;
507 continue;
510 // Check if this field is at offset 0.
511 if (Layout.getFieldOffset(idx) != 0)
512 return false;
514 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
515 return false;
517 // Only allow at most one field in a structure. This doesn't match the
518 // wording above, but follows gcc in situations with a field following an
519 // empty structure.
520 if (!RD->isUnion()) {
521 if (HadField)
522 return false;
524 HadField = true;
528 return true;
531 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
532 unsigned functionCallConv) const {
534 // Variadic functions should always marshal to the base standard.
535 bool IsAAPCS_VFP =
536 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
538 if (RetTy->isVoidType())
539 return ABIArgInfo::getIgnore();
541 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
542 // Large vector types should be returned via memory.
543 if (getContext().getTypeSize(RetTy) > 128)
544 return getNaturalAlignIndirect(RetTy);
545 // TODO: FP16/BF16 vectors should be converted to integer vectors
546 // This check is similar to isIllegalVectorType - refactor?
547 if ((!getTarget().hasLegalHalfType() &&
548 (VT->getElementType()->isFloat16Type() ||
549 VT->getElementType()->isHalfType())) ||
550 (IsFloatABISoftFP &&
551 VT->getElementType()->isBFloat16Type()))
552 return coerceIllegalVector(RetTy);
555 if (!isAggregateTypeForABI(RetTy)) {
556 // Treat an enum type as its underlying type.
557 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
558 RetTy = EnumTy->getDecl()->getIntegerType();
560 if (const auto *EIT = RetTy->getAs<BitIntType>())
561 if (EIT->getNumBits() > 64)
562 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
564 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
565 : ABIArgInfo::getDirect();
568 // Are we following APCS?
569 if (getABIKind() == ARMABIKind::APCS) {
570 if (isEmptyRecord(getContext(), RetTy, false))
571 return ABIArgInfo::getIgnore();
573 // Complex types are all returned as packed integers.
575 // FIXME: Consider using 2 x vector types if the back end handles them
576 // correctly.
577 if (RetTy->isAnyComplexType())
578 return ABIArgInfo::getDirect(llvm::IntegerType::get(
579 getVMContext(), getContext().getTypeSize(RetTy)));
581 // Integer like structures are returned in r0.
582 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
583 // Return in the smallest viable integer type.
584 uint64_t Size = getContext().getTypeSize(RetTy);
585 if (Size <= 8)
586 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
587 if (Size <= 16)
588 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
589 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
592 // Otherwise return in memory.
593 return getNaturalAlignIndirect(RetTy);
596 // Otherwise this is an AAPCS variant.
598 if (isEmptyRecord(getContext(), RetTy, true))
599 return ABIArgInfo::getIgnore();
601 // Check for homogeneous aggregates with AAPCS-VFP.
602 if (IsAAPCS_VFP) {
603 const Type *Base = nullptr;
604 uint64_t Members = 0;
605 if (isHomogeneousAggregate(RetTy, Base, Members))
606 return classifyHomogeneousAggregate(RetTy, Base, Members);
609 // Aggregates <= 4 bytes are returned in r0; other aggregates
610 // are returned indirectly.
611 uint64_t Size = getContext().getTypeSize(RetTy);
612 if (Size <= 32) {
613 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
614 // same size and alignment.
615 if (getTarget().isRenderScriptTarget()) {
616 return coerceToIntArray(RetTy, getContext(), getVMContext());
618 if (getDataLayout().isBigEndian())
619 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
620 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
622 // Return in the smallest viable integer type.
623 if (Size <= 8)
624 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
625 if (Size <= 16)
626 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
627 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
628 } else if (Size <= 128 && getABIKind() == ARMABIKind::AAPCS16_VFP) {
629 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
630 llvm::Type *CoerceTy =
631 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
632 return ABIArgInfo::getDirect(CoerceTy);
635 return getNaturalAlignIndirect(RetTy);
638 /// isIllegalVector - check whether Ty is an illegal vector type.
639 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
640 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
641 // On targets that don't support half, fp16 or bfloat, they are expanded
642 // into float, and we don't want the ABI to depend on whether or not they
643 // are supported in hardware. Thus return false to coerce vectors of these
644 // types into integer vectors.
645 // We do not depend on hasLegalHalfType for bfloat as it is a
646 // separate IR type.
647 if ((!getTarget().hasLegalHalfType() &&
648 (VT->getElementType()->isFloat16Type() ||
649 VT->getElementType()->isHalfType())) ||
650 (IsFloatABISoftFP &&
651 VT->getElementType()->isBFloat16Type()))
652 return true;
653 if (isAndroid()) {
654 // Android shipped using Clang 3.1, which supported a slightly different
655 // vector ABI. The primary differences were that 3-element vector types
656 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
657 // accepts that legacy behavior for Android only.
658 // Check whether VT is legal.
659 unsigned NumElements = VT->getNumElements();
660 // NumElements should be power of 2 or equal to 3.
661 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
662 return true;
663 } else {
664 // Check whether VT is legal.
665 unsigned NumElements = VT->getNumElements();
666 uint64_t Size = getContext().getTypeSize(VT);
667 // NumElements should be power of 2.
668 if (!llvm::isPowerOf2_32(NumElements))
669 return true;
670 // Size should be greater than 32 bits.
671 return Size <= 32;
674 return false;
677 /// Return true if a type contains any 16-bit floating point vectors
678 bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
679 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
680 uint64_t NElements = AT->getSize().getZExtValue();
681 if (NElements == 0)
682 return false;
683 return containsAnyFP16Vectors(AT->getElementType());
684 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
685 const RecordDecl *RD = RT->getDecl();
687 // If this is a C++ record, check the bases first.
688 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
689 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
690 return containsAnyFP16Vectors(B.getType());
692 return true;
694 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
695 return FD && containsAnyFP16Vectors(FD->getType());
697 return true;
699 return false;
700 } else {
701 if (const VectorType *VT = Ty->getAs<VectorType>())
702 return (VT->getElementType()->isFloat16Type() ||
703 VT->getElementType()->isBFloat16Type() ||
704 VT->getElementType()->isHalfType());
705 return false;
709 bool ARMSwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
710 unsigned NumElts) const {
711 if (!llvm::isPowerOf2_32(NumElts))
712 return false;
713 unsigned size = CGT.getDataLayout().getTypeStoreSizeInBits(EltTy);
714 if (size > 64)
715 return false;
716 if (VectorSize.getQuantity() != 8 &&
717 (VectorSize.getQuantity() != 16 || NumElts == 1))
718 return false;
719 return true;
722 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
723 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
724 // double, or 64-bit or 128-bit vectors.
725 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
726 if (BT->getKind() == BuiltinType::Float ||
727 BT->getKind() == BuiltinType::Double ||
728 BT->getKind() == BuiltinType::LongDouble)
729 return true;
730 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
731 unsigned VecSize = getContext().getTypeSize(VT);
732 if (VecSize == 64 || VecSize == 128)
733 return true;
735 return false;
738 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
739 uint64_t Members) const {
740 return Members <= 4;
743 bool ARMABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
744 // AAPCS32 says that the rule for whether something is a homogeneous
745 // aggregate is applied to the output of the data layout decision. So
746 // anything that doesn't affect the data layout also does not affect
747 // homogeneity. In particular, zero-length bitfields don't stop a struct
748 // being homogeneous.
749 return true;
752 bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
753 bool acceptHalf) const {
754 // Give precedence to user-specified calling conventions.
755 if (callConvention != llvm::CallingConv::C)
756 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
757 else
758 return (getABIKind() == ARMABIKind::AAPCS_VFP) ||
759 (acceptHalf && (getABIKind() == ARMABIKind::AAPCS16_VFP));
762 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
763 QualType Ty) const {
764 CharUnits SlotSize = CharUnits::fromQuantity(4);
766 // Empty records are ignored for parameter passing purposes.
767 if (isEmptyRecord(getContext(), Ty, true)) {
768 VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
769 auto *Load = CGF.Builder.CreateLoad(VAListAddr);
770 return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
773 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
774 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
776 // Use indirect if size of the illegal vector is bigger than 16 bytes.
777 bool IsIndirect = false;
778 const Type *Base = nullptr;
779 uint64_t Members = 0;
780 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
781 IsIndirect = true;
783 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
784 // allocated by the caller.
785 } else if (TySize > CharUnits::fromQuantity(16) &&
786 getABIKind() == ARMABIKind::AAPCS16_VFP &&
787 !isHomogeneousAggregate(Ty, Base, Members)) {
788 IsIndirect = true;
790 // Otherwise, bound the type's ABI alignment.
791 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
792 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
793 // Our callers should be prepared to handle an under-aligned address.
794 } else if (getABIKind() == ARMABIKind::AAPCS_VFP ||
795 getABIKind() == ARMABIKind::AAPCS) {
796 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
797 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
798 } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) {
799 // ARMv7k allows type alignment up to 16 bytes.
800 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
801 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
802 } else {
803 TyAlignForABI = CharUnits::fromQuantity(4);
806 TypeInfoChars TyInfo(TySize, TyAlignForABI, AlignRequirementKind::None);
807 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
808 SlotSize, /*AllowHigherAlign*/ true);
811 std::unique_ptr<TargetCodeGenInfo>
812 CodeGen::createARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind Kind) {
813 return std::make_unique<ARMTargetCodeGenInfo>(CGM.getTypes(), Kind);
816 std::unique_ptr<TargetCodeGenInfo>
817 CodeGen::createWindowsARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind K) {
818 return std::make_unique<WindowsARMTargetCodeGenInfo>(CGM.getTypes(), K);