1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements AArch64 TargetInfo objects.
11 //===----------------------------------------------------------------------===//
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/TargetParser/AArch64TargetParser.h"
21 #include "llvm/TargetParser/ARMTargetParserCommon.h"
24 using namespace clang
;
25 using namespace clang::targets
;
27 static constexpr Builtin::Info BuiltinInfo
[] = {
28 #define BUILTIN(ID, TYPE, ATTRS) \
29 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
30 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
31 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32 #include "clang/Basic/BuiltinsNEON.def"
34 #define BUILTIN(ID, TYPE, ATTRS) \
35 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
36 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
37 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38 #include "clang/Basic/BuiltinsSVE.def"
40 #define BUILTIN(ID, TYPE, ATTRS) \
41 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
42 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
43 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
44 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
45 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
46 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
47 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
48 #include "clang/Basic/BuiltinsAArch64.def"
51 void AArch64TargetInfo::setArchFeatures() {
52 if (*ArchInfo
== llvm::AArch64::ARMV8R
) {
62 } else if (ArchInfo
->Version
.getMajor() == 8) {
63 if (ArchInfo
->Version
.getMinor() >= 7u) {
66 if (ArchInfo
->Version
.getMinor() >= 6u) {
70 if (ArchInfo
->Version
.getMinor() >= 5u) {
71 HasAlternativeNZCV
= true;
78 if (ArchInfo
->Version
.getMinor() >= 4u) {
83 if (ArchInfo
->Version
.getMinor() >= 3u) {
87 if (ArchInfo
->Version
.getMinor() >= 2u) {
90 if (ArchInfo
->Version
.getMinor() >= 1u) {
95 } else if (ArchInfo
->Version
.getMajor() == 9) {
96 if (ArchInfo
->Version
.getMinor() >= 2u) {
99 if (ArchInfo
->Version
.getMinor() >= 1u) {
106 HasAlternativeNZCV
= true;
124 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple
&Triple
,
125 const TargetOptions
&Opts
)
126 : TargetInfo(Triple
), ABI("aapcs") {
127 if (getTriple().isOSOpenBSD()) {
128 Int64Type
= SignedLongLong
;
129 IntMaxType
= SignedLongLong
;
131 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
132 WCharType
= UnsignedInt
;
134 Int64Type
= SignedLong
;
135 IntMaxType
= SignedLong
;
138 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
139 HasLegalHalfType
= true;
140 HalfArgsAndReturns
= true;
144 if (Triple
.isArch64Bit())
145 LongWidth
= LongAlign
= PointerWidth
= PointerAlign
= 64;
147 LongWidth
= LongAlign
= PointerWidth
= PointerAlign
= 32;
149 MaxVectorAlign
= 128;
150 MaxAtomicInlineWidth
= 128;
151 MaxAtomicPromoteWidth
= 128;
153 LongDoubleWidth
= LongDoubleAlign
= SuitableAlign
= 128;
154 LongDoubleFormat
= &llvm::APFloat::IEEEquad();
156 BFloat16Width
= BFloat16Align
= 16;
157 BFloat16Format
= &llvm::APFloat::BFloat();
159 // Make __builtin_ms_va_list available.
160 HasBuiltinMSVaList
= true;
162 // Make the SVE types available. Note that this deliberately doesn't
163 // depend on SveMode, since in principle it should be possible to turn
164 // SVE on and off within a translation unit. It should also be possible
165 // to compile the global declaration:
170 HasAArch64SVETypes
= true;
172 // {} in inline assembly are neon specifiers, not assembly variant
174 NoAsmVariants
= true;
176 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
177 // contributes to the alignment of the containing aggregate in the same way
178 // a plain (non bit-field) member of that type would, without exception for
179 // zero-sized or anonymous bit-fields."
180 assert(UseBitFieldTypeAlignment
&& "bitfields affect type alignment");
181 UseZeroLengthBitfieldAlignment
= true;
183 // AArch64 targets default to using the ARM C++ ABI.
184 TheCXXABI
.set(TargetCXXABI::GenericAArch64
);
186 if (Triple
.getOS() == llvm::Triple::Linux
)
187 this->MCountName
= "\01_mcount";
188 else if (Triple
.getOS() == llvm::Triple::UnknownOS
)
190 Opts
.EABIVersion
== llvm::EABI::GNU
? "\01_mcount" : "mcount";
193 StringRef
AArch64TargetInfo::getABI() const { return ABI
; }
195 bool AArch64TargetInfo::setABI(const std::string
&Name
) {
196 if (Name
!= "aapcs" && Name
!= "darwinpcs")
203 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec
, StringRef
,
204 BranchProtectionInfo
&BPI
,
205 StringRef
&Err
) const {
206 llvm::ARM::ParsedBranchProtection PBP
;
207 if (!llvm::ARM::parseBranchProtection(Spec
, PBP
, Err
))
211 llvm::StringSwitch
<LangOptions::SignReturnAddressScopeKind
>(PBP
.Scope
)
212 .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf
)
213 .Case("all", LangOptions::SignReturnAddressScopeKind::All
)
214 .Default(LangOptions::SignReturnAddressScopeKind::None
);
216 if (PBP
.Key
== "a_key")
217 BPI
.SignKey
= LangOptions::SignReturnAddressKeyKind::AKey
;
219 BPI
.SignKey
= LangOptions::SignReturnAddressKeyKind::BKey
;
221 BPI
.BranchTargetEnforcement
= PBP
.BranchTargetEnforcement
;
225 bool AArch64TargetInfo::isValidCPUName(StringRef Name
) const {
226 return Name
== "generic" || llvm::AArch64::parseCpu(Name
);
229 bool AArch64TargetInfo::setCPU(const std::string
&Name
) {
230 return isValidCPUName(Name
);
233 void AArch64TargetInfo::fillValidCPUList(
234 SmallVectorImpl
<StringRef
> &Values
) const {
235 llvm::AArch64::fillValidCPUArchList(Values
);
238 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions
&Opts
,
239 MacroBuilder
&Builder
) const {
240 Builder
.defineMacro("__ARM_FEATURE_QRDMX", "1");
241 Builder
.defineMacro("__ARM_FEATURE_ATOMICS", "1");
242 Builder
.defineMacro("__ARM_FEATURE_CRC32", "1");
245 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions
&Opts
,
246 MacroBuilder
&Builder
) const {
247 // Also include the ARMv8.1 defines
248 getTargetDefinesARMV81A(Opts
, Builder
);
251 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions
&Opts
,
252 MacroBuilder
&Builder
) const {
253 Builder
.defineMacro("__ARM_FEATURE_COMPLEX", "1");
254 Builder
.defineMacro("__ARM_FEATURE_JCVT", "1");
255 Builder
.defineMacro("__ARM_FEATURE_PAUTH", "1");
256 // Also include the Armv8.2 defines
257 getTargetDefinesARMV82A(Opts
, Builder
);
260 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions
&Opts
,
261 MacroBuilder
&Builder
) const {
262 // Also include the Armv8.3 defines
263 getTargetDefinesARMV83A(Opts
, Builder
);
266 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions
&Opts
,
267 MacroBuilder
&Builder
) const {
268 Builder
.defineMacro("__ARM_FEATURE_FRINT", "1");
269 Builder
.defineMacro("__ARM_FEATURE_BTI", "1");
270 // Also include the Armv8.4 defines
271 getTargetDefinesARMV84A(Opts
, Builder
);
274 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions
&Opts
,
275 MacroBuilder
&Builder
) const {
276 // Also include the Armv8.5 defines
277 // FIXME: Armv8.6 makes the following extensions mandatory:
278 // - __ARM_FEATURE_BF16
279 // - __ARM_FEATURE_MATMUL_INT8
281 getTargetDefinesARMV85A(Opts
, Builder
);
284 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions
&Opts
,
285 MacroBuilder
&Builder
) const {
286 // Also include the Armv8.6 defines
287 getTargetDefinesARMV86A(Opts
, Builder
);
290 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions
&Opts
,
291 MacroBuilder
&Builder
) const {
292 // Also include the Armv8.7 defines
293 getTargetDefinesARMV87A(Opts
, Builder
);
296 void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions
&Opts
,
297 MacroBuilder
&Builder
) const {
298 // Also include the Armv8.8 defines
299 getTargetDefinesARMV88A(Opts
, Builder
);
302 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions
&Opts
,
303 MacroBuilder
&Builder
) const {
304 // Armv9-A maps to Armv8.5-A
305 getTargetDefinesARMV85A(Opts
, Builder
);
308 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions
&Opts
,
309 MacroBuilder
&Builder
) const {
310 // Armv9.1-A maps to Armv8.6-A
311 getTargetDefinesARMV86A(Opts
, Builder
);
314 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions
&Opts
,
315 MacroBuilder
&Builder
) const {
316 // Armv9.2-A maps to Armv8.7-A
317 getTargetDefinesARMV87A(Opts
, Builder
);
320 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions
&Opts
,
321 MacroBuilder
&Builder
) const {
322 // Armv9.3-A maps to Armv8.8-A
323 getTargetDefinesARMV88A(Opts
, Builder
);
326 void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions
&Opts
,
327 MacroBuilder
&Builder
) const {
328 // Armv9.4-A maps to Armv8.9-A
329 getTargetDefinesARMV89A(Opts
, Builder
);
332 void AArch64TargetInfo::getTargetDefines(const LangOptions
&Opts
,
333 MacroBuilder
&Builder
) const {
334 // Target identification.
335 Builder
.defineMacro("__aarch64__");
337 if (getTriple().getOS() == llvm::Triple::UnknownOS
&&
338 getTriple().isOSBinFormatELF())
339 Builder
.defineMacro("__ELF__");
341 // Target properties.
342 if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
343 Builder
.defineMacro("_LP64");
344 Builder
.defineMacro("__LP64__");
347 std::string CodeModel
= getTargetOpts().CodeModel
;
348 if (CodeModel
== "default")
350 for (char &c
: CodeModel
)
352 Builder
.defineMacro("__AARCH64_CMODEL_" + CodeModel
+ "__");
354 // ACLE predefines. Many can only have one possible value on v8 AArch64.
355 Builder
.defineMacro("__ARM_ACLE", "200");
356 Builder
.defineMacro("__ARM_ARCH",
357 std::to_string(ArchInfo
->Version
.getMajor()));
358 Builder
.defineMacro("__ARM_ARCH_PROFILE",
359 std::string("'") + (char)ArchInfo
->Profile
+ "'");
361 Builder
.defineMacro("__ARM_64BIT_STATE", "1");
362 Builder
.defineMacro("__ARM_PCS_AAPCS64", "1");
363 Builder
.defineMacro("__ARM_ARCH_ISA_A64", "1");
365 Builder
.defineMacro("__ARM_FEATURE_CLZ", "1");
366 Builder
.defineMacro("__ARM_FEATURE_FMA", "1");
367 Builder
.defineMacro("__ARM_FEATURE_LDREX", "0xF");
368 Builder
.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
369 Builder
.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility
370 Builder
.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
371 Builder
.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
373 Builder
.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
375 // 0xe implies support for half, single and double precision operations.
376 Builder
.defineMacro("__ARM_FP", "0xE");
378 // PCS specifies this for SysV variants, which is all we support. Other ABIs
379 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
380 Builder
.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
381 Builder
.defineMacro("__ARM_FP16_ARGS", "1");
383 if (Opts
.UnsafeFPMath
)
384 Builder
.defineMacro("__ARM_FP_FAST", "1");
386 Builder
.defineMacro("__ARM_SIZEOF_WCHAR_T",
387 Twine(Opts
.WCharSize
? Opts
.WCharSize
: 4));
389 Builder
.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts
.ShortEnums
? "1" : "4");
391 if (FPU
& NeonMode
) {
392 Builder
.defineMacro("__ARM_NEON", "1");
393 // 64-bit NEON supports half, single and double precision operations.
394 Builder
.defineMacro("__ARM_NEON_FP", "0xE");
398 Builder
.defineMacro("__ARM_FEATURE_SVE", "1");
400 if ((FPU
& NeonMode
) && (FPU
& SveMode
))
401 Builder
.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
404 Builder
.defineMacro("__ARM_FEATURE_SVE2", "1");
406 if (HasSVE2
&& HasSVE2AES
)
407 Builder
.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
409 if (HasSVE2
&& HasSVE2BitPerm
)
410 Builder
.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
412 if (HasSVE2
&& HasSVE2SHA3
)
413 Builder
.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
415 if (HasSVE2
&& HasSVE2SM4
)
416 Builder
.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
419 Builder
.defineMacro("__ARM_FEATURE_CRC32", "1");
422 Builder
.defineMacro("__ARM_FEATURE_RCPC", "1");
425 Builder
.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
427 // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
428 // macros for AES, SHA2, SHA3 and SM4
429 if (HasAES
&& HasSHA2
)
430 Builder
.defineMacro("__ARM_FEATURE_CRYPTO", "1");
433 Builder
.defineMacro("__ARM_FEATURE_AES", "1");
436 Builder
.defineMacro("__ARM_FEATURE_SHA2", "1");
439 Builder
.defineMacro("__ARM_FEATURE_SHA3", "1");
440 Builder
.defineMacro("__ARM_FEATURE_SHA512", "1");
444 Builder
.defineMacro("__ARM_FEATURE_SM3", "1");
445 Builder
.defineMacro("__ARM_FEATURE_SM4", "1");
449 Builder
.defineMacro("__ARM_FEATURE_PAUTH", "1");
452 Builder
.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
454 if ((FPU
& NeonMode
) && HasFullFP16
)
455 Builder
.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
457 Builder
.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
460 Builder
.defineMacro("__ARM_FEATURE_DOTPROD", "1");
463 Builder
.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
466 Builder
.defineMacro("__ARM_FEATURE_TME", "1");
469 Builder
.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
472 Builder
.defineMacro("__ARM_FEATURE_ATOMICS", "1");
475 Builder
.defineMacro("__ARM_FEATURE_BF16", "1");
476 Builder
.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
477 Builder
.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
478 Builder
.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
481 if ((FPU
& SveMode
) && HasBFloat16
) {
482 Builder
.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
485 if ((FPU
& SveMode
) && HasMatmulFP64
)
486 Builder
.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
488 if ((FPU
& SveMode
) && HasMatmulFP32
)
489 Builder
.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
491 if ((FPU
& SveMode
) && HasMatMul
)
492 Builder
.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
494 if ((FPU
& NeonMode
) && HasFP16FML
)
495 Builder
.defineMacro("__ARM_FEATURE_FP16_FML", "1");
497 if (Opts
.hasSignReturnAddress()) {
499 // 0: Protection using the A key
500 // 1: Protection using the B key
501 // 2: Protection including leaf functions
504 if (Opts
.isSignReturnAddressWithAKey())
509 if (Opts
.isSignReturnAddressScopeAll())
512 Builder
.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value
));
515 if (Opts
.BranchTargetEnforcement
)
516 Builder
.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
519 Builder
.defineMacro("__ARM_FEATURE_LS64", "1");
522 Builder
.defineMacro("__ARM_FEATURE_RNG", "1");
525 Builder
.defineMacro("__ARM_FEATURE_MOPS", "1");
528 Builder
.defineMacro("__ARM_FEATURE_SYSREG128", "1");
530 if (*ArchInfo
== llvm::AArch64::ARMV8_1A
)
531 getTargetDefinesARMV81A(Opts
, Builder
);
532 else if (*ArchInfo
== llvm::AArch64::ARMV8_2A
)
533 getTargetDefinesARMV82A(Opts
, Builder
);
534 else if (*ArchInfo
== llvm::AArch64::ARMV8_3A
)
535 getTargetDefinesARMV83A(Opts
, Builder
);
536 else if (*ArchInfo
== llvm::AArch64::ARMV8_4A
)
537 getTargetDefinesARMV84A(Opts
, Builder
);
538 else if (*ArchInfo
== llvm::AArch64::ARMV8_5A
)
539 getTargetDefinesARMV85A(Opts
, Builder
);
540 else if (*ArchInfo
== llvm::AArch64::ARMV8_6A
)
541 getTargetDefinesARMV86A(Opts
, Builder
);
542 else if (*ArchInfo
== llvm::AArch64::ARMV8_7A
)
543 getTargetDefinesARMV87A(Opts
, Builder
);
544 else if (*ArchInfo
== llvm::AArch64::ARMV8_8A
)
545 getTargetDefinesARMV88A(Opts
, Builder
);
546 else if (*ArchInfo
== llvm::AArch64::ARMV8_9A
)
547 getTargetDefinesARMV89A(Opts
, Builder
);
548 else if (*ArchInfo
== llvm::AArch64::ARMV9A
)
549 getTargetDefinesARMV9A(Opts
, Builder
);
550 else if (*ArchInfo
== llvm::AArch64::ARMV9_1A
)
551 getTargetDefinesARMV91A(Opts
, Builder
);
552 else if (*ArchInfo
== llvm::AArch64::ARMV9_2A
)
553 getTargetDefinesARMV92A(Opts
, Builder
);
554 else if (*ArchInfo
== llvm::AArch64::ARMV9_3A
)
555 getTargetDefinesARMV93A(Opts
, Builder
);
556 else if (*ArchInfo
== llvm::AArch64::ARMV9_4A
)
557 getTargetDefinesARMV94A(Opts
, Builder
);
559 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
560 Builder
.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
561 Builder
.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
562 Builder
.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
563 Builder
.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
565 // Allow detection of fast FMA support.
566 Builder
.defineMacro("__FP_FAST_FMA", "1");
567 Builder
.defineMacro("__FP_FAST_FMAF", "1");
569 // C/C++ operators work on both VLS and VLA SVE types
571 Builder
.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
573 if (Opts
.VScaleMin
&& Opts
.VScaleMin
== Opts
.VScaleMax
) {
574 Builder
.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts
.VScaleMin
* 128));
578 ArrayRef
<Builtin::Info
> AArch64TargetInfo::getTargetBuiltins() const {
579 return llvm::ArrayRef(BuiltinInfo
, clang::AArch64::LastTSBuiltin
-
580 Builtin::FirstTSBuiltin
);
583 std::optional
<std::pair
<unsigned, unsigned>>
584 AArch64TargetInfo::getVScaleRange(const LangOptions
&LangOpts
) const {
585 if (LangOpts
.VScaleMin
|| LangOpts
.VScaleMax
)
586 return std::pair
<unsigned, unsigned>(
587 LangOpts
.VScaleMin
? LangOpts
.VScaleMin
: 1, LangOpts
.VScaleMax
);
589 if (hasFeature("sve"))
590 return std::pair
<unsigned, unsigned>(1, 16);
595 unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name
) const {
596 if (Name
== "default")
598 for (const auto &E
: llvm::AArch64::Extensions
)
600 return E
.FmvPriority
;
604 unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
605 // Take the maximum priority as per feature cost, so more features win.
606 return llvm::AArch64::ExtensionInfo::MaxFMVPriority
;
609 bool AArch64TargetInfo::getFeatureDepOptions(StringRef Name
,
610 std::string
&FeatureVec
) const {
612 for (const auto &E
: llvm::AArch64::Extensions
) {
613 if (Name
== E
.Name
) {
614 FeatureVec
= E
.DependentFeatures
;
618 return FeatureVec
!= "";
621 bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr
) const {
622 for (const auto &E
: llvm::AArch64::Extensions
)
623 if (FeatureStr
== E
.Name
)
628 bool AArch64TargetInfo::hasFeature(StringRef Feature
) const {
629 return llvm::StringSwitch
<bool>(Feature
)
630 .Cases("aarch64", "arm64", "arm", true)
632 .Cases("neon", "fp", "simd", FPU
& NeonMode
)
633 .Case("jscvt", HasJSCVT
)
634 .Case("fcma", HasFCMA
)
635 .Case("rng", HasRandGen
)
636 .Case("flagm", HasFlagM
)
637 .Case("flagm2", HasAlternativeNZCV
)
638 .Case("fp16fml", HasFP16FML
)
639 .Case("dotprod", HasDotProd
)
644 .Case("sha2", HasSHA2
)
645 .Case("sha3", HasSHA3
)
646 .Cases("aes", "pmull", HasAES
)
647 .Cases("fp16", "fullfp16", HasFullFP16
)
649 .Case("dpb", HasCCPP
)
650 .Case("dpb2", HasCCDP
)
651 .Case("rcpc", HasRCPC
)
652 .Case("frintts", HasFRInt3264
)
653 .Case("i8mm", HasMatMul
)
654 .Case("bf16", HasBFloat16
)
655 .Case("sve", FPU
& SveMode
)
656 .Case("sve-bf16", FPU
& SveMode
&& HasBFloat16
)
657 .Case("sve-i8mm", FPU
& SveMode
&& HasMatMul
)
658 .Case("f32mm", FPU
& SveMode
&& HasMatmulFP32
)
659 .Case("f64mm", FPU
& SveMode
&& HasMatmulFP64
)
660 .Case("sve2", FPU
& SveMode
&& HasSVE2
)
661 .Case("sve2-pmull128", FPU
& SveMode
&& HasSVE2AES
)
662 .Case("sve2-bitperm", FPU
& SveMode
&& HasSVE2BitPerm
)
663 .Case("sve2-sha3", FPU
& SveMode
&& HasSVE2SHA3
)
664 .Case("sve2-sm4", FPU
& SveMode
&& HasSVE2SM4
)
666 .Case("sme-f64f64", HasSMEF64
)
667 .Case("sme-i16i64", HasSMEI64
)
668 .Cases("memtag", "memtag2", HasMTE
)
670 .Case("predres", HasPredRes
)
671 .Cases("ssbs", "ssbs2", HasSSBS
)
673 .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64
)
674 .Case("wfxt", HasWFxT
)
678 void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap
<bool> &Features
,
679 StringRef Name
, bool Enabled
) const {
680 Features
[Name
] = Enabled
;
681 // If the feature is an architecture feature (like v8.2a), add all previous
682 // architecture versions and any dependant target features.
683 const std::optional
<llvm::AArch64::ArchInfo
> ArchInfo
=
684 llvm::AArch64::ArchInfo::findBySubArch(Name
);
687 return; // Not an architecture, nothing more to do.
689 // Disabling an architecture feature does not affect dependent features
693 for (const auto *OtherArch
: llvm::AArch64::ArchInfos
)
694 if (ArchInfo
->implies(*OtherArch
))
695 Features
[OtherArch
->getSubArch()] = true;
697 // Set any features implied by the architecture
698 std::vector
<StringRef
> CPUFeats
;
699 if (llvm::AArch64::getExtensionFeatures(ArchInfo
->DefaultExts
, CPUFeats
)) {
700 for (auto F
: CPUFeats
) {
701 assert(F
[0] == '+' && "Expected + in target feature!");
702 Features
[F
.drop_front(1)] = true;
707 bool AArch64TargetInfo::handleTargetFeatures(std::vector
<std::string
> &Features
,
708 DiagnosticsEngine
&Diags
) {
709 for (const auto &Feature
: Features
) {
710 if (Feature
== "-neon")
712 if (Feature
== "-sve")
715 if (Feature
== "+neon" || Feature
== "+fp-armv8")
717 if (Feature
== "+jscvt") {
721 if (Feature
== "+fcma") {
726 if (Feature
== "+sve") {
731 if (Feature
== "+sve2") {
737 if (Feature
== "+sve2-aes") {
744 if (Feature
== "+sve2-sha3") {
751 if (Feature
== "+sve2-sm4") {
758 if (Feature
== "+sve2-bitperm") {
763 HasSVE2BitPerm
= true;
765 if (Feature
== "+f32mm") {
769 HasMatmulFP32
= true;
771 if (Feature
== "+f64mm") {
775 HasMatmulFP64
= true;
777 if (Feature
== "+sme") {
781 if (Feature
== "+sme-f64f64") {
786 if (Feature
== "+sme-i16i64") {
791 if (Feature
== "+sb")
793 if (Feature
== "+predres")
795 if (Feature
== "+ssbs")
797 if (Feature
== "+bti")
799 if (Feature
== "+wfxt")
801 if (Feature
== "-fmv")
803 if (Feature
== "+crc")
805 if (Feature
== "+rcpc")
807 if (Feature
== "+aes") {
811 if (Feature
== "+sha2") {
815 if (Feature
== "+sha3") {
820 if (Feature
== "+rdm") {
824 if (Feature
== "+dit")
826 if (Feature
== "+cccp")
828 if (Feature
== "+ccdp") {
832 if (Feature
== "+fptoint")
834 if (Feature
== "+sm4") {
838 if (Feature
== "+strict-align")
839 HasUnaligned
= false;
840 // All predecessor archs are added but select the latest one for ArchKind.
841 if (Feature
== "+v8a" && ArchInfo
->Version
< llvm::AArch64::ARMV8A
.Version
)
842 ArchInfo
= &llvm::AArch64::ARMV8A
;
843 if (Feature
== "+v8.1a" &&
844 ArchInfo
->Version
< llvm::AArch64::ARMV8_1A
.Version
)
845 ArchInfo
= &llvm::AArch64::ARMV8_1A
;
846 if (Feature
== "+v8.2a" &&
847 ArchInfo
->Version
< llvm::AArch64::ARMV8_2A
.Version
)
848 ArchInfo
= &llvm::AArch64::ARMV8_2A
;
849 if (Feature
== "+v8.3a" &&
850 ArchInfo
->Version
< llvm::AArch64::ARMV8_3A
.Version
)
851 ArchInfo
= &llvm::AArch64::ARMV8_3A
;
852 if (Feature
== "+v8.4a" &&
853 ArchInfo
->Version
< llvm::AArch64::ARMV8_4A
.Version
)
854 ArchInfo
= &llvm::AArch64::ARMV8_4A
;
855 if (Feature
== "+v8.5a" &&
856 ArchInfo
->Version
< llvm::AArch64::ARMV8_5A
.Version
)
857 ArchInfo
= &llvm::AArch64::ARMV8_5A
;
858 if (Feature
== "+v8.6a" &&
859 ArchInfo
->Version
< llvm::AArch64::ARMV8_6A
.Version
)
860 ArchInfo
= &llvm::AArch64::ARMV8_6A
;
861 if (Feature
== "+v8.7a" &&
862 ArchInfo
->Version
< llvm::AArch64::ARMV8_7A
.Version
)
863 ArchInfo
= &llvm::AArch64::ARMV8_7A
;
864 if (Feature
== "+v8.8a" &&
865 ArchInfo
->Version
< llvm::AArch64::ARMV8_8A
.Version
)
866 ArchInfo
= &llvm::AArch64::ARMV8_8A
;
867 if (Feature
== "+v8.9a" &&
868 ArchInfo
->Version
< llvm::AArch64::ARMV8_9A
.Version
)
869 ArchInfo
= &llvm::AArch64::ARMV8_9A
;
870 if (Feature
== "+v9a" && ArchInfo
->Version
< llvm::AArch64::ARMV9A
.Version
)
871 ArchInfo
= &llvm::AArch64::ARMV9A
;
872 if (Feature
== "+v9.1a" &&
873 ArchInfo
->Version
< llvm::AArch64::ARMV9_1A
.Version
)
874 ArchInfo
= &llvm::AArch64::ARMV9_1A
;
875 if (Feature
== "+v9.2a" &&
876 ArchInfo
->Version
< llvm::AArch64::ARMV9_2A
.Version
)
877 ArchInfo
= &llvm::AArch64::ARMV9_2A
;
878 if (Feature
== "+v9.3a" &&
879 ArchInfo
->Version
< llvm::AArch64::ARMV9_3A
.Version
)
880 ArchInfo
= &llvm::AArch64::ARMV9_3A
;
881 if (Feature
== "+v9.4a" &&
882 ArchInfo
->Version
< llvm::AArch64::ARMV9_4A
.Version
)
883 ArchInfo
= &llvm::AArch64::ARMV9_4A
;
884 if (Feature
== "+v8r")
885 ArchInfo
= &llvm::AArch64::ARMV8R
;
886 if (Feature
== "+fullfp16") {
890 if (Feature
== "+dotprod") {
894 if (Feature
== "+fp16fml") {
899 if (Feature
== "+mte")
901 if (Feature
== "+tme")
903 if (Feature
== "+pauth")
905 if (Feature
== "+i8mm")
907 if (Feature
== "+bf16")
909 if (Feature
== "+lse")
911 if (Feature
== "+ls64")
913 if (Feature
== "+rand")
915 if (Feature
== "+flagm")
917 if (Feature
== "+altnzcv") {
919 HasAlternativeNZCV
= true;
921 if (Feature
== "+mops")
923 if (Feature
== "+d128")
927 // Check features that are manually disabled by command line options.
928 // This needs to be checked after architecture-related features are handled,
929 // making sure they are properly disabled when required.
930 for (const auto &Feature
: Features
) {
931 if (Feature
== "-d128")
948 bool AArch64TargetInfo::initFeatureMap(
949 llvm::StringMap
<bool> &Features
, DiagnosticsEngine
&Diags
, StringRef CPU
,
950 const std::vector
<std::string
> &FeaturesVec
) const {
951 std::vector
<std::string
> UpdatedFeaturesVec
;
952 // Parse the CPU and add any implied features.
953 std::optional
<llvm::AArch64::CpuInfo
> CpuInfo
= llvm::AArch64::parseCpu(CPU
);
955 uint64_t Exts
= CpuInfo
->getImpliedExtensions();
956 std::vector
<StringRef
> CPUFeats
;
957 llvm::AArch64::getExtensionFeatures(Exts
, CPUFeats
);
958 for (auto F
: CPUFeats
) {
959 assert((F
[0] == '+' || F
[0] == '-') && "Expected +/- in target feature!");
960 UpdatedFeaturesVec
.push_back(F
.str());
964 // Process target and dependent features. This is done in two loops collecting
965 // them into UpdatedFeaturesVec: first to add dependent '+'features,
966 // second to add target '+/-'features that can later disable some of
967 // features added on the first loop.
968 for (const auto &Feature
: FeaturesVec
)
969 if ((Feature
[0] == '?' || Feature
[0] == '+')) {
971 if (AArch64TargetInfo::getFeatureDepOptions(Feature
.substr(1), Options
)) {
972 SmallVector
<StringRef
, 1> AttrFeatures
;
973 StringRef(Options
).split(AttrFeatures
, ",");
974 for (auto F
: AttrFeatures
)
975 UpdatedFeaturesVec
.push_back(F
.str());
978 for (const auto &Feature
: FeaturesVec
)
979 if (Feature
[0] != '?') {
980 std::string UpdatedFeature
= Feature
;
981 if (Feature
[0] == '+') {
982 std::optional
<llvm::AArch64::ExtensionInfo
> Extension
=
983 llvm::AArch64::parseArchExtension(Feature
.substr(1));
985 UpdatedFeature
= Extension
->Feature
.str();
987 UpdatedFeaturesVec
.push_back(UpdatedFeature
);
990 return TargetInfo::initFeatureMap(Features
, Diags
, CPU
, UpdatedFeaturesVec
);
993 // Parse AArch64 Target attributes, which are a comma separated list of:
994 // "arch=<arch>" - parsed to features as per -march=..
995 // "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
996 // "tune=<cpu>" - TuneCPU set to <cpu>
997 // "feature", "no-feature" - Add (or remove) feature.
998 // "+feature", "+nofeature" - Add (or remove) feature.
999 ParsedTargetAttr
AArch64TargetInfo::parseTargetAttr(StringRef Features
) const {
1000 ParsedTargetAttr Ret
;
1001 if (Features
== "default")
1003 SmallVector
<StringRef
, 1> AttrFeatures
;
1004 Features
.split(AttrFeatures
, ",");
1005 bool FoundArch
= false;
1007 auto SplitAndAddFeatures
= [](StringRef FeatString
,
1008 std::vector
<std::string
> &Features
) {
1009 SmallVector
<StringRef
, 8> SplitFeatures
;
1010 FeatString
.split(SplitFeatures
, StringRef("+"), -1, false);
1011 for (StringRef Feature
: SplitFeatures
) {
1012 StringRef FeatureName
= llvm::AArch64::getArchExtFeature(Feature
);
1013 if (!FeatureName
.empty())
1014 Features
.push_back(FeatureName
.str());
1016 // Pushing the original feature string to give a sema error later on
1017 // when they get checked.
1018 if (Feature
.startswith("no"))
1019 Features
.push_back("-" + Feature
.drop_front(2).str());
1021 Features
.push_back("+" + Feature
.str());
1025 for (auto &Feature
: AttrFeatures
) {
1026 Feature
= Feature
.trim();
1027 if (Feature
.startswith("fpmath="))
1030 if (Feature
.startswith("branch-protection=")) {
1031 Ret
.BranchProtection
= Feature
.split('=').second
.trim();
1035 if (Feature
.startswith("arch=")) {
1037 Ret
.Duplicate
= "arch=";
1039 std::pair
<StringRef
, StringRef
> Split
=
1040 Feature
.split("=").second
.trim().split("+");
1041 const std::optional
<llvm::AArch64::ArchInfo
> AI
=
1042 llvm::AArch64::parseArch(Split
.first
);
1044 // Parse the architecture version, adding the required features to
1048 Ret
.Features
.push_back(AI
->ArchFeature
.str());
1049 // Add any extra features, after the +
1050 SplitAndAddFeatures(Split
.second
, Ret
.Features
);
1051 } else if (Feature
.startswith("cpu=")) {
1052 if (!Ret
.CPU
.empty())
1053 Ret
.Duplicate
= "cpu=";
1055 // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1056 // "+feat" features.
1057 std::pair
<StringRef
, StringRef
> Split
=
1058 Feature
.split("=").second
.trim().split("+");
1059 Ret
.CPU
= Split
.first
;
1060 SplitAndAddFeatures(Split
.second
, Ret
.Features
);
1062 } else if (Feature
.startswith("tune=")) {
1063 if (!Ret
.Tune
.empty())
1064 Ret
.Duplicate
= "tune=";
1066 Ret
.Tune
= Feature
.split("=").second
.trim();
1067 } else if (Feature
.startswith("+")) {
1068 SplitAndAddFeatures(Feature
, Ret
.Features
);
1069 } else if (Feature
.startswith("no-")) {
1070 StringRef FeatureName
=
1071 llvm::AArch64::getArchExtFeature(Feature
.split("-").second
);
1072 if (!FeatureName
.empty())
1073 Ret
.Features
.push_back("-" + FeatureName
.drop_front(1).str());
1075 Ret
.Features
.push_back("-" + Feature
.split("-").second
.str());
1077 // Try parsing the string to the internal target feature name. If it is
1078 // invalid, add the original string (which could already be an internal
1079 // name). These should be checked later by isValidFeatureName.
1080 StringRef FeatureName
= llvm::AArch64::getArchExtFeature(Feature
);
1081 if (!FeatureName
.empty())
1082 Ret
.Features
.push_back(FeatureName
.str());
1084 Ret
.Features
.push_back("+" + Feature
.str());
1090 bool AArch64TargetInfo::hasBFloat16Type() const {
1094 TargetInfo::CallingConvCheckResult
1095 AArch64TargetInfo::checkCallingConvention(CallingConv CC
) const {
1100 case CC_PreserveMost
:
1101 case CC_PreserveAll
:
1102 case CC_OpenCLKernel
:
1103 case CC_AArch64VectorCall
:
1104 case CC_AArch64SVEPCS
:
1108 return CCCR_Warning
;
1112 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1114 TargetInfo::BuiltinVaListKind
AArch64TargetInfo::getBuiltinVaListKind() const {
1115 return TargetInfo::AArch64ABIBuiltinVaList
;
1118 const char *const AArch64TargetInfo::GCCRegNames
[] = {
1119 // 32-bit Integer registers
1120 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1121 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1122 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1124 // 64-bit Integer registers
1125 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1126 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1127 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1129 // 32-bit floating point regsisters
1130 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1131 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1132 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1134 // 64-bit floating point regsisters
1135 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1136 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1137 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1139 // Neon vector registers
1140 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1141 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1142 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1144 // SVE vector registers
1145 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
1146 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1147 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1149 // SVE predicate registers
1150 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
1151 "p11", "p12", "p13", "p14", "p15"
1154 ArrayRef
<const char *> AArch64TargetInfo::getGCCRegNames() const {
1155 return llvm::ArrayRef(GCCRegNames
);
1158 const TargetInfo::GCCRegAlias
AArch64TargetInfo::GCCRegAliases
[] = {
1161 // GCC rN registers are aliases of xN registers.
1191 {{"r29", "x29"}, "fp"},
1192 {{"r30", "x30"}, "lr"},
1193 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1194 // don't want to substitute one of these for a different-sized one.
1197 ArrayRef
<TargetInfo::GCCRegAlias
> AArch64TargetInfo::getGCCRegAliases() const {
1198 return llvm::ArrayRef(GCCRegAliases
);
1201 bool AArch64TargetInfo::validateAsmConstraint(
1202 const char *&Name
, TargetInfo::ConstraintInfo
&Info
) const {
1206 case 'w': // Floating point and SIMD registers (V0-V31)
1207 Info
.setAllowsRegister();
1209 case 'I': // Constant that can be used with an ADD instruction
1210 case 'J': // Constant that can be used with a SUB instruction
1211 case 'K': // Constant that can be used with a 32-bit logical instruction
1212 case 'L': // Constant that can be used with a 64-bit logical instruction
1213 case 'M': // Constant that can be used as a 32-bit MOV immediate
1214 case 'N': // Constant that can be used as a 64-bit MOV immediate
1215 case 'Y': // Floating point constant zero
1216 case 'Z': // Integer constant zero
1218 case 'Q': // A memory reference with base register and no offset
1219 Info
.setAllowsMemory();
1221 case 'S': // A symbolic address
1222 Info
.setAllowsRegister();
1225 if (Name
[1] == 'p' && (Name
[2] == 'l' || Name
[2] == 'a')) {
1226 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
1227 Info
.setAllowsRegister();
1231 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1232 // Utf: A memory address suitable for ldp/stp in TF mode.
1233 // Usa: An absolute symbolic address.
1234 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1236 // Better to return an error saying that it's an unrecognised constraint
1237 // even if this is a valid constraint in gcc.
1239 case 'z': // Zero register, wzr or xzr
1240 Info
.setAllowsRegister();
1242 case 'x': // Floating point and SIMD registers (V0-V15)
1243 Info
.setAllowsRegister();
1245 case 'y': // SVE registers (V0-V7)
1246 Info
.setAllowsRegister();
1252 bool AArch64TargetInfo::validateConstraintModifier(
1253 StringRef Constraint
, char Modifier
, unsigned Size
,
1254 std::string
&SuggestedModifier
) const {
1255 // Strip off constraint modifiers.
1256 while (Constraint
[0] == '=' || Constraint
[0] == '+' || Constraint
[0] == '&')
1257 Constraint
= Constraint
.substr(1);
1259 switch (Constraint
[0]) {
1267 // For now assume that the person knows what they're
1268 // doing with the modifier.
1271 // By default an 'r' constraint will be in the 'x'
1279 SuggestedModifier
= "w";
1286 const char *AArch64TargetInfo::getClobbers() const { return ""; }
1288 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo
) const {
1296 bool AArch64TargetInfo::hasInt128Type() const { return true; }
1298 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple
&Triple
,
1299 const TargetOptions
&Opts
)
1300 : AArch64TargetInfo(Triple
, Opts
) {}
1302 void AArch64leTargetInfo::setDataLayout() {
1303 if (getTriple().isOSBinFormatMachO()) {
1304 if(getTriple().isArch32Bit())
1305 resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
1307 resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
1309 resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1312 void AArch64leTargetInfo::getTargetDefines(const LangOptions
&Opts
,
1313 MacroBuilder
&Builder
) const {
1314 Builder
.defineMacro("__AARCH64EL__");
1315 AArch64TargetInfo::getTargetDefines(Opts
, Builder
);
1318 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple
&Triple
,
1319 const TargetOptions
&Opts
)
1320 : AArch64TargetInfo(Triple
, Opts
) {}
1322 void AArch64beTargetInfo::getTargetDefines(const LangOptions
&Opts
,
1323 MacroBuilder
&Builder
) const {
1324 Builder
.defineMacro("__AARCH64EB__");
1325 Builder
.defineMacro("__AARCH_BIG_ENDIAN");
1326 Builder
.defineMacro("__ARM_BIG_ENDIAN");
1327 AArch64TargetInfo::getTargetDefines(Opts
, Builder
);
1330 void AArch64beTargetInfo::setDataLayout() {
1331 assert(!getTriple().isOSBinFormatMachO());
1332 resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1335 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple
&Triple
,
1336 const TargetOptions
&Opts
)
1337 : WindowsTargetInfo
<AArch64leTargetInfo
>(Triple
, Opts
), Triple(Triple
) {
1339 // This is an LLP64 platform.
1340 // int:4, long:4, long long:8, long double:8.
1341 IntWidth
= IntAlign
= 32;
1342 LongWidth
= LongAlign
= 32;
1343 DoubleAlign
= LongLongAlign
= 64;
1344 LongDoubleWidth
= LongDoubleAlign
= 64;
1345 LongDoubleFormat
= &llvm::APFloat::IEEEdouble();
1346 IntMaxType
= SignedLongLong
;
1347 Int64Type
= SignedLongLong
;
1348 SizeType
= UnsignedLongLong
;
1349 PtrDiffType
= SignedLongLong
;
1350 IntPtrType
= SignedLongLong
;
1353 void WindowsARM64TargetInfo::setDataLayout() {
1354 resetDataLayout(Triple
.isOSBinFormatMachO()
1355 ? "e-m:o-i64:64-i128:128-n32:64-S128"
1356 : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1357 Triple
.isOSBinFormatMachO() ? "_" : "");
1360 TargetInfo::BuiltinVaListKind
1361 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1362 return TargetInfo::CharPtrBuiltinVaList
;
1365 TargetInfo::CallingConvCheckResult
1366 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC
) const {
1369 case CC_X86ThisCall
:
1370 case CC_X86FastCall
:
1371 case CC_X86VectorCall
:
1374 case CC_OpenCLKernel
:
1375 case CC_PreserveMost
:
1376 case CC_PreserveAll
:
1382 return CCCR_Warning
;
1386 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple
&Triple
,
1387 const TargetOptions
&Opts
)
1388 : WindowsARM64TargetInfo(Triple
, Opts
) {
1389 TheCXXABI
.set(TargetCXXABI::Microsoft
);
1392 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions
&Opts
,
1393 MacroBuilder
&Builder
) const {
1394 WindowsARM64TargetInfo::getTargetDefines(Opts
, Builder
);
1395 Builder
.defineMacro("_M_ARM64", "1");
1398 TargetInfo::CallingConvKind
1399 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4
) const {
1400 return CCK_MicrosoftWin64
;
1403 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize
) const {
1404 unsigned Align
= WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize
);
1406 // MSVC does size based alignment for arm64 based on alignment section in
1407 // below document, replicate that to keep alignment consistent with object
1408 // files compiled by MSVC.
1409 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1410 if (TypeSize
>= 512) { // TypeSize >= 64 bytes
1411 Align
= std::max(Align
, 128u); // align type at least 16 bytes
1412 } else if (TypeSize
>= 64) { // TypeSize >= 8 bytes
1413 Align
= std::max(Align
, 64u); // align type at least 8 butes
1414 } else if (TypeSize
>= 16) { // TypeSize >= 2 bytes
1415 Align
= std::max(Align
, 32u); // align type at least 4 bytes
1420 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple
&Triple
,
1421 const TargetOptions
&Opts
)
1422 : WindowsARM64TargetInfo(Triple
, Opts
) {
1423 TheCXXABI
.set(TargetCXXABI::GenericAArch64
);
1426 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple
&Triple
,
1427 const TargetOptions
&Opts
)
1428 : DarwinTargetInfo
<AArch64leTargetInfo
>(Triple
, Opts
) {
1429 Int64Type
= SignedLongLong
;
1430 if (getTriple().isArch32Bit())
1431 IntMaxType
= SignedLongLong
;
1433 WCharType
= SignedInt
;
1434 UseSignedCharForObjCBool
= false;
1436 LongDoubleWidth
= LongDoubleAlign
= SuitableAlign
= 64;
1437 LongDoubleFormat
= &llvm::APFloat::IEEEdouble();
1439 UseZeroLengthBitfieldAlignment
= false;
1441 if (getTriple().isArch32Bit()) {
1442 UseBitFieldTypeAlignment
= false;
1443 ZeroLengthBitfieldBoundary
= 32;
1444 UseZeroLengthBitfieldAlignment
= true;
1445 TheCXXABI
.set(TargetCXXABI::WatchOS
);
1447 TheCXXABI
.set(TargetCXXABI::AppleARM64
);
1450 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions
&Opts
,
1451 const llvm::Triple
&Triple
,
1452 MacroBuilder
&Builder
) const {
1453 Builder
.defineMacro("__AARCH64_SIMD__");
1454 if (Triple
.isArch32Bit())
1455 Builder
.defineMacro("__ARM64_ARCH_8_32__");
1457 Builder
.defineMacro("__ARM64_ARCH_8__");
1458 Builder
.defineMacro("__ARM_NEON__");
1459 Builder
.defineMacro("__LITTLE_ENDIAN__");
1460 Builder
.defineMacro("__REGISTER_PREFIX__", "");
1461 Builder
.defineMacro("__arm64", "1");
1462 Builder
.defineMacro("__arm64__", "1");
1464 if (Triple
.isArm64e())
1465 Builder
.defineMacro("__arm64e__", "1");
1467 getDarwinDefines(Builder
, Opts
, Triple
, PlatformName
, PlatformMinVersion
);
1470 TargetInfo::BuiltinVaListKind
1471 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1472 return TargetInfo::CharPtrBuiltinVaList
;
1475 // 64-bit RenderScript is aarch64
1476 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple
&Triple
,
1477 const TargetOptions
&Opts
)
1478 : AArch64leTargetInfo(llvm::Triple("aarch64", Triple
.getVendorName(),
1480 Triple
.getEnvironmentName()),
1482 IsRenderScriptTarget
= true;
1485 void RenderScript64TargetInfo::getTargetDefines(const LangOptions
&Opts
,
1486 MacroBuilder
&Builder
) const {
1487 Builder
.defineMacro("__RENDERSCRIPT__");
1488 AArch64leTargetInfo::getTargetDefines(Opts
, Builder
);