1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements AArch64 TargetInfo objects.
11 //===----------------------------------------------------------------------===//
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/TargetParser/AArch64TargetParser.h"
21 #include "llvm/TargetParser/ARMTargetParserCommon.h"
24 using namespace clang
;
25 using namespace clang::targets
;
27 static constexpr Builtin::Info BuiltinInfo
[] = {
28 #define BUILTIN(ID, TYPE, ATTRS) \
29 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
30 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
31 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32 #include "clang/Basic/BuiltinsNEON.def"
34 #define BUILTIN(ID, TYPE, ATTRS) \
35 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
36 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
37 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38 #include "clang/Basic/BuiltinsSVE.def"
40 #define BUILTIN(ID, TYPE, ATTRS) \
41 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
42 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
43 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44 #include "clang/Basic/BuiltinsSME.def"
46 #define BUILTIN(ID, TYPE, ATTRS) \
47 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
48 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
49 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
50 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
51 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
52 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
53 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
54 #include "clang/Basic/BuiltinsAArch64.def"
57 void AArch64TargetInfo::setArchFeatures() {
58 if (*ArchInfo
== llvm::AArch64::ARMV8R
) {
68 } else if (ArchInfo
->Version
.getMajor() == 8) {
69 if (ArchInfo
->Version
.getMinor() >= 7u) {
72 if (ArchInfo
->Version
.getMinor() >= 6u) {
76 if (ArchInfo
->Version
.getMinor() >= 5u) {
77 HasAlternativeNZCV
= true;
84 if (ArchInfo
->Version
.getMinor() >= 4u) {
89 if (ArchInfo
->Version
.getMinor() >= 3u) {
93 if (ArchInfo
->Version
.getMinor() >= 2u) {
96 if (ArchInfo
->Version
.getMinor() >= 1u) {
101 } else if (ArchInfo
->Version
.getMajor() == 9) {
102 if (ArchInfo
->Version
.getMinor() >= 2u) {
105 if (ArchInfo
->Version
.getMinor() >= 1u) {
112 HasAlternativeNZCV
= true;
130 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple
&Triple
,
131 const TargetOptions
&Opts
)
132 : TargetInfo(Triple
), ABI("aapcs") {
133 if (getTriple().isOSOpenBSD()) {
134 Int64Type
= SignedLongLong
;
135 IntMaxType
= SignedLongLong
;
137 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
138 WCharType
= UnsignedInt
;
140 Int64Type
= SignedLong
;
141 IntMaxType
= SignedLong
;
144 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
145 HasLegalHalfType
= true;
146 HalfArgsAndReturns
= true;
150 if (Triple
.isArch64Bit())
151 LongWidth
= LongAlign
= PointerWidth
= PointerAlign
= 64;
153 LongWidth
= LongAlign
= PointerWidth
= PointerAlign
= 32;
155 MaxVectorAlign
= 128;
156 MaxAtomicInlineWidth
= 128;
157 MaxAtomicPromoteWidth
= 128;
159 LongDoubleWidth
= LongDoubleAlign
= SuitableAlign
= 128;
160 LongDoubleFormat
= &llvm::APFloat::IEEEquad();
162 BFloat16Width
= BFloat16Align
= 16;
163 BFloat16Format
= &llvm::APFloat::BFloat();
165 // Make __builtin_ms_va_list available.
166 HasBuiltinMSVaList
= true;
168 // Make the SVE types available. Note that this deliberately doesn't
169 // depend on SveMode, since in principle it should be possible to turn
170 // SVE on and off within a translation unit. It should also be possible
171 // to compile the global declaration:
176 HasAArch64SVETypes
= true;
178 // {} in inline assembly are neon specifiers, not assembly variant
180 NoAsmVariants
= true;
182 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
183 // contributes to the alignment of the containing aggregate in the same way
184 // a plain (non bit-field) member of that type would, without exception for
185 // zero-sized or anonymous bit-fields."
186 assert(UseBitFieldTypeAlignment
&& "bitfields affect type alignment");
187 UseZeroLengthBitfieldAlignment
= true;
189 // AArch64 targets default to using the ARM C++ ABI.
190 TheCXXABI
.set(TargetCXXABI::GenericAArch64
);
192 if (Triple
.getOS() == llvm::Triple::Linux
)
193 this->MCountName
= "\01_mcount";
194 else if (Triple
.getOS() == llvm::Triple::UnknownOS
)
196 Opts
.EABIVersion
== llvm::EABI::GNU
? "\01_mcount" : "mcount";
199 StringRef
AArch64TargetInfo::getABI() const { return ABI
; }
201 bool AArch64TargetInfo::setABI(const std::string
&Name
) {
202 if (Name
!= "aapcs" && Name
!= "darwinpcs")
209 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec
, StringRef
,
210 BranchProtectionInfo
&BPI
,
211 StringRef
&Err
) const {
212 llvm::ARM::ParsedBranchProtection PBP
;
213 if (!llvm::ARM::parseBranchProtection(Spec
, PBP
, Err
))
217 llvm::StringSwitch
<LangOptions::SignReturnAddressScopeKind
>(PBP
.Scope
)
218 .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf
)
219 .Case("all", LangOptions::SignReturnAddressScopeKind::All
)
220 .Default(LangOptions::SignReturnAddressScopeKind::None
);
222 if (PBP
.Key
== "a_key")
223 BPI
.SignKey
= LangOptions::SignReturnAddressKeyKind::AKey
;
225 BPI
.SignKey
= LangOptions::SignReturnAddressKeyKind::BKey
;
227 BPI
.BranchTargetEnforcement
= PBP
.BranchTargetEnforcement
;
231 bool AArch64TargetInfo::isValidCPUName(StringRef Name
) const {
232 return Name
== "generic" || llvm::AArch64::parseCpu(Name
);
235 bool AArch64TargetInfo::setCPU(const std::string
&Name
) {
236 return isValidCPUName(Name
);
239 void AArch64TargetInfo::fillValidCPUList(
240 SmallVectorImpl
<StringRef
> &Values
) const {
241 llvm::AArch64::fillValidCPUArchList(Values
);
244 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions
&Opts
,
245 MacroBuilder
&Builder
) const {
246 Builder
.defineMacro("__ARM_FEATURE_QRDMX", "1");
249 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions
&Opts
,
250 MacroBuilder
&Builder
) const {
251 // Also include the ARMv8.1 defines
252 getTargetDefinesARMV81A(Opts
, Builder
);
255 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions
&Opts
,
256 MacroBuilder
&Builder
) const {
257 Builder
.defineMacro("__ARM_FEATURE_COMPLEX", "1");
258 Builder
.defineMacro("__ARM_FEATURE_JCVT", "1");
259 Builder
.defineMacro("__ARM_FEATURE_PAUTH", "1");
260 // Also include the Armv8.2 defines
261 getTargetDefinesARMV82A(Opts
, Builder
);
264 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions
&Opts
,
265 MacroBuilder
&Builder
) const {
266 // Also include the Armv8.3 defines
267 getTargetDefinesARMV83A(Opts
, Builder
);
270 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions
&Opts
,
271 MacroBuilder
&Builder
) const {
272 Builder
.defineMacro("__ARM_FEATURE_FRINT", "1");
273 Builder
.defineMacro("__ARM_FEATURE_BTI", "1");
274 // Also include the Armv8.4 defines
275 getTargetDefinesARMV84A(Opts
, Builder
);
278 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions
&Opts
,
279 MacroBuilder
&Builder
) const {
280 // Also include the Armv8.5 defines
281 // FIXME: Armv8.6 makes the following extensions mandatory:
282 // - __ARM_FEATURE_BF16
283 // - __ARM_FEATURE_MATMUL_INT8
285 getTargetDefinesARMV85A(Opts
, Builder
);
288 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions
&Opts
,
289 MacroBuilder
&Builder
) const {
290 // Also include the Armv8.6 defines
291 getTargetDefinesARMV86A(Opts
, Builder
);
294 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions
&Opts
,
295 MacroBuilder
&Builder
) const {
296 // Also include the Armv8.7 defines
297 getTargetDefinesARMV87A(Opts
, Builder
);
300 void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions
&Opts
,
301 MacroBuilder
&Builder
) const {
302 // Also include the Armv8.8 defines
303 getTargetDefinesARMV88A(Opts
, Builder
);
306 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions
&Opts
,
307 MacroBuilder
&Builder
) const {
308 // Armv9-A maps to Armv8.5-A
309 getTargetDefinesARMV85A(Opts
, Builder
);
312 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions
&Opts
,
313 MacroBuilder
&Builder
) const {
314 // Armv9.1-A maps to Armv8.6-A
315 getTargetDefinesARMV86A(Opts
, Builder
);
318 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions
&Opts
,
319 MacroBuilder
&Builder
) const {
320 // Armv9.2-A maps to Armv8.7-A
321 getTargetDefinesARMV87A(Opts
, Builder
);
324 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions
&Opts
,
325 MacroBuilder
&Builder
) const {
326 // Armv9.3-A maps to Armv8.8-A
327 getTargetDefinesARMV88A(Opts
, Builder
);
330 void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions
&Opts
,
331 MacroBuilder
&Builder
) const {
332 // Armv9.4-A maps to Armv8.9-A
333 getTargetDefinesARMV89A(Opts
, Builder
);
336 void AArch64TargetInfo::getTargetDefines(const LangOptions
&Opts
,
337 MacroBuilder
&Builder
) const {
338 // Target identification.
339 Builder
.defineMacro("__aarch64__");
340 // Inline assembly supports AArch64 flag outputs.
341 Builder
.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
343 std::string CodeModel
= getTargetOpts().CodeModel
;
344 if (CodeModel
== "default")
346 for (char &c
: CodeModel
)
348 Builder
.defineMacro("__AARCH64_CMODEL_" + CodeModel
+ "__");
350 // ACLE predefines. Many can only have one possible value on v8 AArch64.
351 Builder
.defineMacro("__ARM_ACLE", "200");
352 Builder
.defineMacro("__ARM_ARCH",
353 std::to_string(ArchInfo
->Version
.getMajor()));
354 Builder
.defineMacro("__ARM_ARCH_PROFILE",
355 std::string("'") + (char)ArchInfo
->Profile
+ "'");
357 Builder
.defineMacro("__ARM_64BIT_STATE", "1");
358 Builder
.defineMacro("__ARM_PCS_AAPCS64", "1");
359 Builder
.defineMacro("__ARM_ARCH_ISA_A64", "1");
361 Builder
.defineMacro("__ARM_FEATURE_CLZ", "1");
362 Builder
.defineMacro("__ARM_FEATURE_FMA", "1");
363 Builder
.defineMacro("__ARM_FEATURE_LDREX", "0xF");
364 Builder
.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
365 Builder
.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility
366 Builder
.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
367 Builder
.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
369 Builder
.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
371 // 0xe implies support for half, single and double precision operations.
373 Builder
.defineMacro("__ARM_FP", "0xE");
375 // PCS specifies this for SysV variants, which is all we support. Other ABIs
376 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
377 Builder
.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
378 Builder
.defineMacro("__ARM_FP16_ARGS", "1");
380 if (Opts
.UnsafeFPMath
)
381 Builder
.defineMacro("__ARM_FP_FAST", "1");
383 Builder
.defineMacro("__ARM_SIZEOF_WCHAR_T",
384 Twine(Opts
.WCharSize
? Opts
.WCharSize
: 4));
386 Builder
.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts
.ShortEnums
? "1" : "4");
388 if (FPU
& NeonMode
) {
389 Builder
.defineMacro("__ARM_NEON", "1");
390 // 64-bit NEON supports half, single and double precision operations.
391 Builder
.defineMacro("__ARM_NEON_FP", "0xE");
395 Builder
.defineMacro("__ARM_FEATURE_SVE", "1");
397 if ((FPU
& NeonMode
) && (FPU
& SveMode
))
398 Builder
.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
401 Builder
.defineMacro("__ARM_FEATURE_SVE2", "1");
403 if (HasSVE2
&& HasSVE2AES
)
404 Builder
.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
406 if (HasSVE2
&& HasSVE2BitPerm
)
407 Builder
.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
409 if (HasSVE2
&& HasSVE2SHA3
)
410 Builder
.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
412 if (HasSVE2
&& HasSVE2SM4
)
413 Builder
.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
416 Builder
.defineMacro("__ARM_FEATURE_CRC32", "1");
419 Builder
.defineMacro("__ARM_FEATURE_RCPC", "1");
422 Builder
.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
424 // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
425 // macros for AES, SHA2, SHA3 and SM4
426 if (HasAES
&& HasSHA2
)
427 Builder
.defineMacro("__ARM_FEATURE_CRYPTO", "1");
430 Builder
.defineMacro("__ARM_FEATURE_AES", "1");
433 Builder
.defineMacro("__ARM_FEATURE_SHA2", "1");
436 Builder
.defineMacro("__ARM_FEATURE_SHA3", "1");
437 Builder
.defineMacro("__ARM_FEATURE_SHA512", "1");
441 Builder
.defineMacro("__ARM_FEATURE_SM3", "1");
442 Builder
.defineMacro("__ARM_FEATURE_SM4", "1");
446 Builder
.defineMacro("__ARM_FEATURE_PAUTH", "1");
449 Builder
.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
451 if ((FPU
& NeonMode
) && HasFullFP16
)
452 Builder
.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
454 Builder
.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
457 Builder
.defineMacro("__ARM_FEATURE_DOTPROD", "1");
460 Builder
.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
463 Builder
.defineMacro("__ARM_FEATURE_TME", "1");
466 Builder
.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
469 Builder
.defineMacro("__ARM_FEATURE_ATOMICS", "1");
472 Builder
.defineMacro("__ARM_FEATURE_BF16", "1");
473 Builder
.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
474 Builder
.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
475 Builder
.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
478 if ((FPU
& SveMode
) && HasBFloat16
) {
479 Builder
.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
482 if ((FPU
& SveMode
) && HasMatmulFP64
)
483 Builder
.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
485 if ((FPU
& SveMode
) && HasMatmulFP32
)
486 Builder
.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
488 if ((FPU
& SveMode
) && HasMatMul
)
489 Builder
.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
491 if ((FPU
& NeonMode
) && HasFP16FML
)
492 Builder
.defineMacro("__ARM_FEATURE_FP16_FML", "1");
494 if (Opts
.hasSignReturnAddress()) {
496 // 0: Protection using the A key
497 // 1: Protection using the B key
498 // 2: Protection including leaf functions
501 if (Opts
.isSignReturnAddressWithAKey())
506 if (Opts
.isSignReturnAddressScopeAll())
509 Builder
.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value
));
512 if (Opts
.BranchTargetEnforcement
)
513 Builder
.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
516 Builder
.defineMacro("__ARM_FEATURE_LS64", "1");
519 Builder
.defineMacro("__ARM_FEATURE_RNG", "1");
522 Builder
.defineMacro("__ARM_FEATURE_MOPS", "1");
525 Builder
.defineMacro("__ARM_FEATURE_SYSREG128", "1");
527 if (*ArchInfo
== llvm::AArch64::ARMV8_1A
)
528 getTargetDefinesARMV81A(Opts
, Builder
);
529 else if (*ArchInfo
== llvm::AArch64::ARMV8_2A
)
530 getTargetDefinesARMV82A(Opts
, Builder
);
531 else if (*ArchInfo
== llvm::AArch64::ARMV8_3A
)
532 getTargetDefinesARMV83A(Opts
, Builder
);
533 else if (*ArchInfo
== llvm::AArch64::ARMV8_4A
)
534 getTargetDefinesARMV84A(Opts
, Builder
);
535 else if (*ArchInfo
== llvm::AArch64::ARMV8_5A
)
536 getTargetDefinesARMV85A(Opts
, Builder
);
537 else if (*ArchInfo
== llvm::AArch64::ARMV8_6A
)
538 getTargetDefinesARMV86A(Opts
, Builder
);
539 else if (*ArchInfo
== llvm::AArch64::ARMV8_7A
)
540 getTargetDefinesARMV87A(Opts
, Builder
);
541 else if (*ArchInfo
== llvm::AArch64::ARMV8_8A
)
542 getTargetDefinesARMV88A(Opts
, Builder
);
543 else if (*ArchInfo
== llvm::AArch64::ARMV8_9A
)
544 getTargetDefinesARMV89A(Opts
, Builder
);
545 else if (*ArchInfo
== llvm::AArch64::ARMV9A
)
546 getTargetDefinesARMV9A(Opts
, Builder
);
547 else if (*ArchInfo
== llvm::AArch64::ARMV9_1A
)
548 getTargetDefinesARMV91A(Opts
, Builder
);
549 else if (*ArchInfo
== llvm::AArch64::ARMV9_2A
)
550 getTargetDefinesARMV92A(Opts
, Builder
);
551 else if (*ArchInfo
== llvm::AArch64::ARMV9_3A
)
552 getTargetDefinesARMV93A(Opts
, Builder
);
553 else if (*ArchInfo
== llvm::AArch64::ARMV9_4A
)
554 getTargetDefinesARMV94A(Opts
, Builder
);
556 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
557 Builder
.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
558 Builder
.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
559 Builder
.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
560 Builder
.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
562 // Allow detection of fast FMA support.
563 Builder
.defineMacro("__FP_FAST_FMA", "1");
564 Builder
.defineMacro("__FP_FAST_FMAF", "1");
566 // C/C++ operators work on both VLS and VLA SVE types
568 Builder
.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
570 if (Opts
.VScaleMin
&& Opts
.VScaleMin
== Opts
.VScaleMax
) {
571 Builder
.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts
.VScaleMin
* 128));
575 ArrayRef
<Builtin::Info
> AArch64TargetInfo::getTargetBuiltins() const {
576 return llvm::ArrayRef(BuiltinInfo
, clang::AArch64::LastTSBuiltin
-
577 Builtin::FirstTSBuiltin
);
580 std::optional
<std::pair
<unsigned, unsigned>>
581 AArch64TargetInfo::getVScaleRange(const LangOptions
&LangOpts
) const {
582 if (LangOpts
.VScaleMin
|| LangOpts
.VScaleMax
)
583 return std::pair
<unsigned, unsigned>(
584 LangOpts
.VScaleMin
? LangOpts
.VScaleMin
: 1, LangOpts
.VScaleMax
);
586 if (hasFeature("sve"))
587 return std::pair
<unsigned, unsigned>(1, 16);
592 unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name
) const {
593 if (Name
== "default")
595 for (const auto &E
: llvm::AArch64::Extensions
)
597 return E
.FmvPriority
;
601 unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
602 // Take the maximum priority as per feature cost, so more features win.
603 return llvm::AArch64::ExtensionInfo::MaxFMVPriority
;
606 bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name
) const {
607 auto F
= llvm::find_if(llvm::AArch64::Extensions
, [&](const auto &E
) {
608 return Name
== E
.Name
&& !E
.DependentFeatures
.empty();
610 return F
!= std::end(llvm::AArch64::Extensions
);
613 StringRef
AArch64TargetInfo::getFeatureDependencies(StringRef Name
) const {
614 auto F
= llvm::find_if(llvm::AArch64::Extensions
,
615 [&](const auto &E
) { return Name
== E
.Name
; });
616 return F
!= std::end(llvm::AArch64::Extensions
) ? F
->DependentFeatures
620 bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr
) const {
621 for (const auto &E
: llvm::AArch64::Extensions
)
622 if (FeatureStr
== E
.Name
)
627 bool AArch64TargetInfo::hasFeature(StringRef Feature
) const {
628 return llvm::StringSwitch
<bool>(Feature
)
629 .Cases("aarch64", "arm64", "arm", true)
631 .Cases("neon", "fp", "simd", FPU
& NeonMode
)
632 .Case("jscvt", HasJSCVT
)
633 .Case("fcma", HasFCMA
)
634 .Case("rng", HasRandGen
)
635 .Case("flagm", HasFlagM
)
636 .Case("flagm2", HasAlternativeNZCV
)
637 .Case("fp16fml", HasFP16FML
)
638 .Case("dotprod", HasDotProd
)
643 .Case("sha2", HasSHA2
)
644 .Case("sha3", HasSHA3
)
645 .Cases("aes", "pmull", HasAES
)
646 .Cases("fp16", "fullfp16", HasFullFP16
)
648 .Case("dpb", HasCCPP
)
649 .Case("dpb2", HasCCDP
)
650 .Case("rcpc", HasRCPC
)
651 .Case("frintts", HasFRInt3264
)
652 .Case("i8mm", HasMatMul
)
653 .Case("bf16", HasBFloat16
)
654 .Case("sve", FPU
& SveMode
)
655 .Case("sve-bf16", FPU
& SveMode
&& HasBFloat16
)
656 .Case("sve-i8mm", FPU
& SveMode
&& HasMatMul
)
657 .Case("f32mm", FPU
& SveMode
&& HasMatmulFP32
)
658 .Case("f64mm", FPU
& SveMode
&& HasMatmulFP64
)
659 .Case("sve2", FPU
& SveMode
&& HasSVE2
)
660 .Case("sve2-pmull128", FPU
& SveMode
&& HasSVE2AES
)
661 .Case("sve2-bitperm", FPU
& SveMode
&& HasSVE2BitPerm
)
662 .Case("sve2-sha3", FPU
& SveMode
&& HasSVE2SHA3
)
663 .Case("sve2-sm4", FPU
& SveMode
&& HasSVE2SM4
)
665 .Case("sme-f64f64", HasSMEF64F64
)
666 .Case("sme-i16i64", HasSMEI16I64
)
667 .Cases("memtag", "memtag2", HasMTE
)
669 .Case("predres", HasPredRes
)
670 .Cases("ssbs", "ssbs2", HasSSBS
)
672 .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64
)
673 .Case("wfxt", HasWFxT
)
677 void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap
<bool> &Features
,
678 StringRef Name
, bool Enabled
) const {
679 Features
[Name
] = Enabled
;
680 // If the feature is an architecture feature (like v8.2a), add all previous
681 // architecture versions and any dependant target features.
682 const std::optional
<llvm::AArch64::ArchInfo
> ArchInfo
=
683 llvm::AArch64::ArchInfo::findBySubArch(Name
);
686 return; // Not an architecture, nothing more to do.
688 // Disabling an architecture feature does not affect dependent features
692 for (const auto *OtherArch
: llvm::AArch64::ArchInfos
)
693 if (ArchInfo
->implies(*OtherArch
))
694 Features
[OtherArch
->getSubArch()] = true;
696 // Set any features implied by the architecture
697 std::vector
<StringRef
> CPUFeats
;
698 if (llvm::AArch64::getExtensionFeatures(ArchInfo
->DefaultExts
, CPUFeats
)) {
699 for (auto F
: CPUFeats
) {
700 assert(F
[0] == '+' && "Expected + in target feature!");
701 Features
[F
.drop_front(1)] = true;
706 bool AArch64TargetInfo::handleTargetFeatures(std::vector
<std::string
> &Features
,
707 DiagnosticsEngine
&Diags
) {
708 for (const auto &Feature
: Features
) {
709 if (Feature
== "-fp-armv8")
711 if (Feature
== "-neon")
713 if (Feature
== "-sve")
716 if (Feature
== "+neon" || Feature
== "+fp-armv8")
718 if (Feature
== "+jscvt") {
722 if (Feature
== "+fcma") {
727 if (Feature
== "+sve") {
732 if (Feature
== "+sve2") {
738 if (Feature
== "+sve2-aes") {
745 if (Feature
== "+sve2-sha3") {
752 if (Feature
== "+sve2-sm4") {
759 if (Feature
== "+sve2-bitperm") {
764 HasSVE2BitPerm
= true;
766 if (Feature
== "+f32mm") {
770 HasMatmulFP32
= true;
772 if (Feature
== "+f64mm") {
776 HasMatmulFP64
= true;
778 if (Feature
== "+sme") {
783 if (Feature
== "+sme-f64f64") {
789 if (Feature
== "+sme-i16i64") {
795 if (Feature
== "+sb")
797 if (Feature
== "+predres")
799 if (Feature
== "+ssbs")
801 if (Feature
== "+bti")
803 if (Feature
== "+wfxt")
805 if (Feature
== "-fmv")
807 if (Feature
== "+crc")
809 if (Feature
== "+rcpc")
811 if (Feature
== "+aes") {
815 if (Feature
== "+sha2") {
819 if (Feature
== "+sha3") {
824 if (Feature
== "+rdm") {
828 if (Feature
== "+dit")
830 if (Feature
== "+cccp")
832 if (Feature
== "+ccdp") {
836 if (Feature
== "+fptoint")
838 if (Feature
== "+sm4") {
842 if (Feature
== "+strict-align")
843 HasUnaligned
= false;
844 // All predecessor archs are added but select the latest one for ArchKind.
845 if (Feature
== "+v8a" && ArchInfo
->Version
< llvm::AArch64::ARMV8A
.Version
)
846 ArchInfo
= &llvm::AArch64::ARMV8A
;
847 if (Feature
== "+v8.1a" &&
848 ArchInfo
->Version
< llvm::AArch64::ARMV8_1A
.Version
)
849 ArchInfo
= &llvm::AArch64::ARMV8_1A
;
850 if (Feature
== "+v8.2a" &&
851 ArchInfo
->Version
< llvm::AArch64::ARMV8_2A
.Version
)
852 ArchInfo
= &llvm::AArch64::ARMV8_2A
;
853 if (Feature
== "+v8.3a" &&
854 ArchInfo
->Version
< llvm::AArch64::ARMV8_3A
.Version
)
855 ArchInfo
= &llvm::AArch64::ARMV8_3A
;
856 if (Feature
== "+v8.4a" &&
857 ArchInfo
->Version
< llvm::AArch64::ARMV8_4A
.Version
)
858 ArchInfo
= &llvm::AArch64::ARMV8_4A
;
859 if (Feature
== "+v8.5a" &&
860 ArchInfo
->Version
< llvm::AArch64::ARMV8_5A
.Version
)
861 ArchInfo
= &llvm::AArch64::ARMV8_5A
;
862 if (Feature
== "+v8.6a" &&
863 ArchInfo
->Version
< llvm::AArch64::ARMV8_6A
.Version
)
864 ArchInfo
= &llvm::AArch64::ARMV8_6A
;
865 if (Feature
== "+v8.7a" &&
866 ArchInfo
->Version
< llvm::AArch64::ARMV8_7A
.Version
)
867 ArchInfo
= &llvm::AArch64::ARMV8_7A
;
868 if (Feature
== "+v8.8a" &&
869 ArchInfo
->Version
< llvm::AArch64::ARMV8_8A
.Version
)
870 ArchInfo
= &llvm::AArch64::ARMV8_8A
;
871 if (Feature
== "+v8.9a" &&
872 ArchInfo
->Version
< llvm::AArch64::ARMV8_9A
.Version
)
873 ArchInfo
= &llvm::AArch64::ARMV8_9A
;
874 if (Feature
== "+v9a" && ArchInfo
->Version
< llvm::AArch64::ARMV9A
.Version
)
875 ArchInfo
= &llvm::AArch64::ARMV9A
;
876 if (Feature
== "+v9.1a" &&
877 ArchInfo
->Version
< llvm::AArch64::ARMV9_1A
.Version
)
878 ArchInfo
= &llvm::AArch64::ARMV9_1A
;
879 if (Feature
== "+v9.2a" &&
880 ArchInfo
->Version
< llvm::AArch64::ARMV9_2A
.Version
)
881 ArchInfo
= &llvm::AArch64::ARMV9_2A
;
882 if (Feature
== "+v9.3a" &&
883 ArchInfo
->Version
< llvm::AArch64::ARMV9_3A
.Version
)
884 ArchInfo
= &llvm::AArch64::ARMV9_3A
;
885 if (Feature
== "+v9.4a" &&
886 ArchInfo
->Version
< llvm::AArch64::ARMV9_4A
.Version
)
887 ArchInfo
= &llvm::AArch64::ARMV9_4A
;
888 if (Feature
== "+v8r")
889 ArchInfo
= &llvm::AArch64::ARMV8R
;
890 if (Feature
== "+fullfp16") {
894 if (Feature
== "+dotprod") {
898 if (Feature
== "+fp16fml") {
903 if (Feature
== "+mte")
905 if (Feature
== "+tme")
907 if (Feature
== "+pauth")
909 if (Feature
== "+i8mm")
911 if (Feature
== "+bf16")
913 if (Feature
== "+lse")
915 if (Feature
== "+ls64")
917 if (Feature
== "+rand")
919 if (Feature
== "+flagm")
921 if (Feature
== "+altnzcv") {
923 HasAlternativeNZCV
= true;
925 if (Feature
== "+mops")
927 if (Feature
== "+d128")
929 if (Feature
== "+gcs")
933 // Check features that are manually disabled by command line options.
934 // This needs to be checked after architecture-related features are handled,
935 // making sure they are properly disabled when required.
936 for (const auto &Feature
: Features
) {
937 if (Feature
== "-d128")
959 bool AArch64TargetInfo::initFeatureMap(
960 llvm::StringMap
<bool> &Features
, DiagnosticsEngine
&Diags
, StringRef CPU
,
961 const std::vector
<std::string
> &FeaturesVec
) const {
962 std::vector
<std::string
> UpdatedFeaturesVec
;
963 // Parse the CPU and add any implied features.
964 std::optional
<llvm::AArch64::CpuInfo
> CpuInfo
= llvm::AArch64::parseCpu(CPU
);
966 uint64_t Exts
= CpuInfo
->getImpliedExtensions();
967 std::vector
<StringRef
> CPUFeats
;
968 llvm::AArch64::getExtensionFeatures(Exts
, CPUFeats
);
969 for (auto F
: CPUFeats
) {
970 assert((F
[0] == '+' || F
[0] == '-') && "Expected +/- in target feature!");
971 UpdatedFeaturesVec
.push_back(F
.str());
975 // Process target and dependent features. This is done in two loops collecting
976 // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
977 // add target '+/-'features that can later disable some of features added on
978 // the first loop. Function Multi Versioning features begin with '?'.
979 for (const auto &Feature
: FeaturesVec
)
980 if (((Feature
[0] == '?' || Feature
[0] == '+')) &&
981 AArch64TargetInfo::doesFeatureAffectCodeGen(Feature
.substr(1))) {
982 StringRef DepFeatures
=
983 AArch64TargetInfo::getFeatureDependencies(Feature
.substr(1));
984 SmallVector
<StringRef
, 1> AttrFeatures
;
985 DepFeatures
.split(AttrFeatures
, ",");
986 for (auto F
: AttrFeatures
)
987 UpdatedFeaturesVec
.push_back(F
.str());
989 for (const auto &Feature
: FeaturesVec
)
990 if (Feature
[0] != '?') {
991 std::string UpdatedFeature
= Feature
;
992 if (Feature
[0] == '+') {
993 std::optional
<llvm::AArch64::ExtensionInfo
> Extension
=
994 llvm::AArch64::parseArchExtension(Feature
.substr(1));
996 UpdatedFeature
= Extension
->Feature
.str();
998 UpdatedFeaturesVec
.push_back(UpdatedFeature
);
1001 return TargetInfo::initFeatureMap(Features
, Diags
, CPU
, UpdatedFeaturesVec
);
1004 // Parse AArch64 Target attributes, which are a comma separated list of:
1005 // "arch=<arch>" - parsed to features as per -march=..
1006 // "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1007 // "tune=<cpu>" - TuneCPU set to <cpu>
1008 // "feature", "no-feature" - Add (or remove) feature.
1009 // "+feature", "+nofeature" - Add (or remove) feature.
1010 ParsedTargetAttr
AArch64TargetInfo::parseTargetAttr(StringRef Features
) const {
1011 ParsedTargetAttr Ret
;
1012 if (Features
== "default")
1014 SmallVector
<StringRef
, 1> AttrFeatures
;
1015 Features
.split(AttrFeatures
, ",");
1016 bool FoundArch
= false;
1018 auto SplitAndAddFeatures
= [](StringRef FeatString
,
1019 std::vector
<std::string
> &Features
) {
1020 SmallVector
<StringRef
, 8> SplitFeatures
;
1021 FeatString
.split(SplitFeatures
, StringRef("+"), -1, false);
1022 for (StringRef Feature
: SplitFeatures
) {
1023 StringRef FeatureName
= llvm::AArch64::getArchExtFeature(Feature
);
1024 if (!FeatureName
.empty())
1025 Features
.push_back(FeatureName
.str());
1027 // Pushing the original feature string to give a sema error later on
1028 // when they get checked.
1029 if (Feature
.startswith("no"))
1030 Features
.push_back("-" + Feature
.drop_front(2).str());
1032 Features
.push_back("+" + Feature
.str());
1036 for (auto &Feature
: AttrFeatures
) {
1037 Feature
= Feature
.trim();
1038 if (Feature
.startswith("fpmath="))
1041 if (Feature
.startswith("branch-protection=")) {
1042 Ret
.BranchProtection
= Feature
.split('=').second
.trim();
1046 if (Feature
.startswith("arch=")) {
1048 Ret
.Duplicate
= "arch=";
1050 std::pair
<StringRef
, StringRef
> Split
=
1051 Feature
.split("=").second
.trim().split("+");
1052 const std::optional
<llvm::AArch64::ArchInfo
> AI
=
1053 llvm::AArch64::parseArch(Split
.first
);
1055 // Parse the architecture version, adding the required features to
1059 Ret
.Features
.push_back(AI
->ArchFeature
.str());
1060 // Add any extra features, after the +
1061 SplitAndAddFeatures(Split
.second
, Ret
.Features
);
1062 } else if (Feature
.startswith("cpu=")) {
1063 if (!Ret
.CPU
.empty())
1064 Ret
.Duplicate
= "cpu=";
1066 // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1067 // "+feat" features.
1068 std::pair
<StringRef
, StringRef
> Split
=
1069 Feature
.split("=").second
.trim().split("+");
1070 Ret
.CPU
= Split
.first
;
1071 SplitAndAddFeatures(Split
.second
, Ret
.Features
);
1073 } else if (Feature
.startswith("tune=")) {
1074 if (!Ret
.Tune
.empty())
1075 Ret
.Duplicate
= "tune=";
1077 Ret
.Tune
= Feature
.split("=").second
.trim();
1078 } else if (Feature
.startswith("+")) {
1079 SplitAndAddFeatures(Feature
, Ret
.Features
);
1080 } else if (Feature
.startswith("no-")) {
1081 StringRef FeatureName
=
1082 llvm::AArch64::getArchExtFeature(Feature
.split("-").second
);
1083 if (!FeatureName
.empty())
1084 Ret
.Features
.push_back("-" + FeatureName
.drop_front(1).str());
1086 Ret
.Features
.push_back("-" + Feature
.split("-").second
.str());
1088 // Try parsing the string to the internal target feature name. If it is
1089 // invalid, add the original string (which could already be an internal
1090 // name). These should be checked later by isValidFeatureName.
1091 StringRef FeatureName
= llvm::AArch64::getArchExtFeature(Feature
);
1092 if (!FeatureName
.empty())
1093 Ret
.Features
.push_back(FeatureName
.str());
1095 Ret
.Features
.push_back("+" + Feature
.str());
1101 bool AArch64TargetInfo::hasBFloat16Type() const {
1105 TargetInfo::CallingConvCheckResult
1106 AArch64TargetInfo::checkCallingConvention(CallingConv CC
) const {
1111 case CC_PreserveMost
:
1112 case CC_PreserveAll
:
1113 case CC_OpenCLKernel
:
1114 case CC_AArch64VectorCall
:
1115 case CC_AArch64SVEPCS
:
1119 return CCCR_Warning
;
1123 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1125 TargetInfo::BuiltinVaListKind
AArch64TargetInfo::getBuiltinVaListKind() const {
1126 return TargetInfo::AArch64ABIBuiltinVaList
;
1129 const char *const AArch64TargetInfo::GCCRegNames
[] = {
1130 // 32-bit Integer registers
1131 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1132 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1133 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1135 // 64-bit Integer registers
1136 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1137 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1138 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1140 // 32-bit floating point regsisters
1141 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1142 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1143 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1145 // 64-bit floating point regsisters
1146 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1147 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1148 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1150 // Neon vector registers
1151 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1152 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1153 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1155 // SVE vector registers
1156 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
1157 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1158 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1160 // SVE predicate registers
1161 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
1162 "p11", "p12", "p13", "p14", "p15"
1165 ArrayRef
<const char *> AArch64TargetInfo::getGCCRegNames() const {
1166 return llvm::ArrayRef(GCCRegNames
);
1169 const TargetInfo::GCCRegAlias
AArch64TargetInfo::GCCRegAliases
[] = {
1172 // GCC rN registers are aliases of xN registers.
1202 {{"r29", "x29"}, "fp"},
1203 {{"r30", "x30"}, "lr"},
1204 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1205 // don't want to substitute one of these for a different-sized one.
1208 ArrayRef
<TargetInfo::GCCRegAlias
> AArch64TargetInfo::getGCCRegAliases() const {
1209 return llvm::ArrayRef(GCCRegAliases
);
1212 // Returns the length of cc constraint.
1213 static unsigned matchAsmCCConstraint(const char *Name
) {
1214 constexpr unsigned len
= 5;
1215 auto RV
= llvm::StringSwitch
<unsigned>(Name
)
1237 AArch64TargetInfo::convertConstraint(const char *&Constraint
) const {
1239 switch (*Constraint
) {
1240 case 'U': // Three-character constraint; add "@3" hint for later parsing.
1241 R
= std::string("@3") + std::string(Constraint
, 3);
1245 if (const unsigned Len
= matchAsmCCConstraint(Constraint
)) {
1246 std::string Converted
= "{" + std::string(Constraint
, Len
) + "}";
1247 Constraint
+= Len
- 1;
1250 return std::string(1, *Constraint
);
1252 R
= TargetInfo::convertConstraint(Constraint
);
1258 bool AArch64TargetInfo::validateAsmConstraint(
1259 const char *&Name
, TargetInfo::ConstraintInfo
&Info
) const {
1263 case 'w': // Floating point and SIMD registers (V0-V31)
1264 Info
.setAllowsRegister();
1266 case 'I': // Constant that can be used with an ADD instruction
1267 case 'J': // Constant that can be used with a SUB instruction
1268 case 'K': // Constant that can be used with a 32-bit logical instruction
1269 case 'L': // Constant that can be used with a 64-bit logical instruction
1270 case 'M': // Constant that can be used as a 32-bit MOV immediate
1271 case 'N': // Constant that can be used as a 64-bit MOV immediate
1272 case 'Y': // Floating point constant zero
1273 case 'Z': // Integer constant zero
1275 case 'Q': // A memory reference with base register and no offset
1276 Info
.setAllowsMemory();
1278 case 'S': // A symbolic address
1279 Info
.setAllowsRegister();
1282 if (Name
[1] == 'p' && (Name
[2] == 'l' || Name
[2] == 'a')) {
1283 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
1284 Info
.setAllowsRegister();
1288 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1289 // Utf: A memory address suitable for ldp/stp in TF mode.
1290 // Usa: An absolute symbolic address.
1291 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1293 // Better to return an error saying that it's an unrecognised constraint
1294 // even if this is a valid constraint in gcc.
1296 case 'z': // Zero register, wzr or xzr
1297 Info
.setAllowsRegister();
1299 case 'x': // Floating point and SIMD registers (V0-V15)
1300 Info
.setAllowsRegister();
1302 case 'y': // SVE registers (V0-V7)
1303 Info
.setAllowsRegister();
1307 if (const unsigned Len
= matchAsmCCConstraint(Name
)) {
1309 Info
.setAllowsRegister();
1316 bool AArch64TargetInfo::validateConstraintModifier(
1317 StringRef Constraint
, char Modifier
, unsigned Size
,
1318 std::string
&SuggestedModifier
) const {
1319 // Strip off constraint modifiers.
1320 while (Constraint
[0] == '=' || Constraint
[0] == '+' || Constraint
[0] == '&')
1321 Constraint
= Constraint
.substr(1);
1323 switch (Constraint
[0]) {
1331 // For now assume that the person knows what they're
1332 // doing with the modifier.
1335 // By default an 'r' constraint will be in the 'x'
1343 SuggestedModifier
= "w";
1350 std::string_view
AArch64TargetInfo::getClobbers() const { return ""; }
1352 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo
) const {
1360 bool AArch64TargetInfo::hasInt128Type() const { return true; }
1362 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple
&Triple
,
1363 const TargetOptions
&Opts
)
1364 : AArch64TargetInfo(Triple
, Opts
) {}
1366 void AArch64leTargetInfo::setDataLayout() {
1367 if (getTriple().isOSBinFormatMachO()) {
1368 if(getTriple().isArch32Bit())
1369 resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
1371 resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
1373 resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1376 void AArch64leTargetInfo::getTargetDefines(const LangOptions
&Opts
,
1377 MacroBuilder
&Builder
) const {
1378 Builder
.defineMacro("__AARCH64EL__");
1379 AArch64TargetInfo::getTargetDefines(Opts
, Builder
);
1382 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple
&Triple
,
1383 const TargetOptions
&Opts
)
1384 : AArch64TargetInfo(Triple
, Opts
) {}
1386 void AArch64beTargetInfo::getTargetDefines(const LangOptions
&Opts
,
1387 MacroBuilder
&Builder
) const {
1388 Builder
.defineMacro("__AARCH64EB__");
1389 Builder
.defineMacro("__AARCH_BIG_ENDIAN");
1390 Builder
.defineMacro("__ARM_BIG_ENDIAN");
1391 AArch64TargetInfo::getTargetDefines(Opts
, Builder
);
1394 void AArch64beTargetInfo::setDataLayout() {
1395 assert(!getTriple().isOSBinFormatMachO());
1396 resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1399 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple
&Triple
,
1400 const TargetOptions
&Opts
)
1401 : WindowsTargetInfo
<AArch64leTargetInfo
>(Triple
, Opts
), Triple(Triple
) {
1403 // This is an LLP64 platform.
1404 // int:4, long:4, long long:8, long double:8.
1405 IntWidth
= IntAlign
= 32;
1406 LongWidth
= LongAlign
= 32;
1407 DoubleAlign
= LongLongAlign
= 64;
1408 LongDoubleWidth
= LongDoubleAlign
= 64;
1409 LongDoubleFormat
= &llvm::APFloat::IEEEdouble();
1410 IntMaxType
= SignedLongLong
;
1411 Int64Type
= SignedLongLong
;
1412 SizeType
= UnsignedLongLong
;
1413 PtrDiffType
= SignedLongLong
;
1414 IntPtrType
= SignedLongLong
;
1417 void WindowsARM64TargetInfo::setDataLayout() {
1418 resetDataLayout(Triple
.isOSBinFormatMachO()
1419 ? "e-m:o-i64:64-i128:128-n32:64-S128"
1420 : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1421 Triple
.isOSBinFormatMachO() ? "_" : "");
1424 TargetInfo::BuiltinVaListKind
1425 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1426 return TargetInfo::CharPtrBuiltinVaList
;
1429 TargetInfo::CallingConvCheckResult
1430 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC
) const {
1433 case CC_X86ThisCall
:
1434 case CC_X86FastCall
:
1435 case CC_X86VectorCall
:
1438 case CC_OpenCLKernel
:
1439 case CC_PreserveMost
:
1440 case CC_PreserveAll
:
1446 return CCCR_Warning
;
1450 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple
&Triple
,
1451 const TargetOptions
&Opts
)
1452 : WindowsARM64TargetInfo(Triple
, Opts
) {
1453 TheCXXABI
.set(TargetCXXABI::Microsoft
);
1456 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions
&Opts
,
1457 MacroBuilder
&Builder
) const {
1458 WindowsARM64TargetInfo::getTargetDefines(Opts
, Builder
);
1459 Builder
.defineMacro("_M_ARM64", "1");
1462 TargetInfo::CallingConvKind
1463 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4
) const {
1464 return CCK_MicrosoftWin64
;
1467 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize
) const {
1468 unsigned Align
= WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize
);
1470 // MSVC does size based alignment for arm64 based on alignment section in
1471 // below document, replicate that to keep alignment consistent with object
1472 // files compiled by MSVC.
1473 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1474 if (TypeSize
>= 512) { // TypeSize >= 64 bytes
1475 Align
= std::max(Align
, 128u); // align type at least 16 bytes
1476 } else if (TypeSize
>= 64) { // TypeSize >= 8 bytes
1477 Align
= std::max(Align
, 64u); // align type at least 8 butes
1478 } else if (TypeSize
>= 16) { // TypeSize >= 2 bytes
1479 Align
= std::max(Align
, 32u); // align type at least 4 bytes
1484 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple
&Triple
,
1485 const TargetOptions
&Opts
)
1486 : WindowsARM64TargetInfo(Triple
, Opts
) {
1487 TheCXXABI
.set(TargetCXXABI::GenericAArch64
);
1490 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple
&Triple
,
1491 const TargetOptions
&Opts
)
1492 : DarwinTargetInfo
<AArch64leTargetInfo
>(Triple
, Opts
) {
1493 Int64Type
= SignedLongLong
;
1494 if (getTriple().isArch32Bit())
1495 IntMaxType
= SignedLongLong
;
1497 WCharType
= SignedInt
;
1498 UseSignedCharForObjCBool
= false;
1500 LongDoubleWidth
= LongDoubleAlign
= SuitableAlign
= 64;
1501 LongDoubleFormat
= &llvm::APFloat::IEEEdouble();
1503 UseZeroLengthBitfieldAlignment
= false;
1505 if (getTriple().isArch32Bit()) {
1506 UseBitFieldTypeAlignment
= false;
1507 ZeroLengthBitfieldBoundary
= 32;
1508 UseZeroLengthBitfieldAlignment
= true;
1509 TheCXXABI
.set(TargetCXXABI::WatchOS
);
1511 TheCXXABI
.set(TargetCXXABI::AppleARM64
);
1514 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions
&Opts
,
1515 const llvm::Triple
&Triple
,
1516 MacroBuilder
&Builder
) const {
1517 Builder
.defineMacro("__AARCH64_SIMD__");
1518 if (Triple
.isArch32Bit())
1519 Builder
.defineMacro("__ARM64_ARCH_8_32__");
1521 Builder
.defineMacro("__ARM64_ARCH_8__");
1522 Builder
.defineMacro("__ARM_NEON__");
1523 Builder
.defineMacro("__REGISTER_PREFIX__", "");
1524 Builder
.defineMacro("__arm64", "1");
1525 Builder
.defineMacro("__arm64__", "1");
1527 if (Triple
.isArm64e())
1528 Builder
.defineMacro("__arm64e__", "1");
1530 getDarwinDefines(Builder
, Opts
, Triple
, PlatformName
, PlatformMinVersion
);
1533 TargetInfo::BuiltinVaListKind
1534 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1535 return TargetInfo::CharPtrBuiltinVaList
;
1538 // 64-bit RenderScript is aarch64
1539 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple
&Triple
,
1540 const TargetOptions
&Opts
)
1541 : AArch64leTargetInfo(llvm::Triple("aarch64", Triple
.getVendorName(),
1543 Triple
.getEnvironmentName()),
1545 IsRenderScriptTarget
= true;
1548 void RenderScript64TargetInfo::getTargetDefines(const LangOptions
&Opts
,
1549 MacroBuilder
&Builder
) const {
1550 Builder
.defineMacro("__RENDERSCRIPT__");
1551 AArch64leTargetInfo::getTargetDefines(Opts
, Builder
);