[clang][modules] Don't prevent translation of FW_Private includes when explicitly...
[llvm-project.git] / clang / lib / Basic / Targets / AArch64.cpp
blobfe5a7af97b7753cdce72406ecf0e6c59021dc7f7
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
11 //===----------------------------------------------------------------------===//
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/TargetParser/AArch64TargetParser.h"
21 #include "llvm/TargetParser/ARMTargetParserCommon.h"
22 #include <optional>
24 using namespace clang;
25 using namespace clang::targets;
27 static constexpr Builtin::Info BuiltinInfo[] = {
28 #define BUILTIN(ID, TYPE, ATTRS) \
29 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
30 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
31 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32 #include "clang/Basic/BuiltinsNEON.def"
34 #define BUILTIN(ID, TYPE, ATTRS) \
35 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
36 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
37 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38 #include "clang/Basic/BuiltinsSVE.def"
40 #define BUILTIN(ID, TYPE, ATTRS) \
41 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
42 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
43 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44 #include "clang/Basic/BuiltinsSME.def"
46 #define BUILTIN(ID, TYPE, ATTRS) \
47 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
48 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
49 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
50 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
51 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
52 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
53 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
54 #include "clang/Basic/BuiltinsAArch64.def"
57 void AArch64TargetInfo::setArchFeatures() {
58 if (*ArchInfo == llvm::AArch64::ARMV8R) {
59 HasDotProd = true;
60 HasDIT = true;
61 HasFlagM = true;
62 HasRCPC = true;
63 FPU |= NeonMode;
64 HasCCPP = true;
65 HasCRC = true;
66 HasLSE = true;
67 HasRDM = true;
68 } else if (ArchInfo->Version.getMajor() == 8) {
69 if (ArchInfo->Version.getMinor() >= 7u) {
70 HasWFxT = true;
72 if (ArchInfo->Version.getMinor() >= 6u) {
73 HasBFloat16 = true;
74 HasMatMul = true;
76 if (ArchInfo->Version.getMinor() >= 5u) {
77 HasAlternativeNZCV = true;
78 HasFRInt3264 = true;
79 HasSSBS = true;
80 HasSB = true;
81 HasPredRes = true;
82 HasBTI = true;
84 if (ArchInfo->Version.getMinor() >= 4u) {
85 HasDotProd = true;
86 HasDIT = true;
87 HasFlagM = true;
89 if (ArchInfo->Version.getMinor() >= 3u) {
90 HasRCPC = true;
91 FPU |= NeonMode;
93 if (ArchInfo->Version.getMinor() >= 2u) {
94 HasCCPP = true;
96 if (ArchInfo->Version.getMinor() >= 1u) {
97 HasCRC = true;
98 HasLSE = true;
99 HasRDM = true;
101 } else if (ArchInfo->Version.getMajor() == 9) {
102 if (ArchInfo->Version.getMinor() >= 2u) {
103 HasWFxT = true;
105 if (ArchInfo->Version.getMinor() >= 1u) {
106 HasBFloat16 = true;
107 HasMatMul = true;
109 FPU |= SveMode;
110 HasSVE2 = true;
111 HasFullFP16 = true;
112 HasAlternativeNZCV = true;
113 HasFRInt3264 = true;
114 HasSSBS = true;
115 HasSB = true;
116 HasPredRes = true;
117 HasBTI = true;
118 HasDotProd = true;
119 HasDIT = true;
120 HasFlagM = true;
121 HasRCPC = true;
122 FPU |= NeonMode;
123 HasCCPP = true;
124 HasCRC = true;
125 HasLSE = true;
126 HasRDM = true;
130 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
131 const TargetOptions &Opts)
132 : TargetInfo(Triple), ABI("aapcs") {
133 if (getTriple().isOSOpenBSD()) {
134 Int64Type = SignedLongLong;
135 IntMaxType = SignedLongLong;
136 } else {
137 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
138 WCharType = UnsignedInt;
140 Int64Type = SignedLong;
141 IntMaxType = SignedLong;
144 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
145 HasLegalHalfType = true;
146 HalfArgsAndReturns = true;
147 HasFloat16 = true;
148 HasStrictFP = true;
150 if (Triple.isArch64Bit())
151 LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
152 else
153 LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
155 MaxVectorAlign = 128;
156 MaxAtomicInlineWidth = 128;
157 MaxAtomicPromoteWidth = 128;
159 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
160 LongDoubleFormat = &llvm::APFloat::IEEEquad();
162 BFloat16Width = BFloat16Align = 16;
163 BFloat16Format = &llvm::APFloat::BFloat();
165 // Make __builtin_ms_va_list available.
166 HasBuiltinMSVaList = true;
168 // Make the SVE types available. Note that this deliberately doesn't
169 // depend on SveMode, since in principle it should be possible to turn
170 // SVE on and off within a translation unit. It should also be possible
171 // to compile the global declaration:
173 // __SVInt8_t *ptr;
175 // even without SVE.
176 HasAArch64SVETypes = true;
178 // {} in inline assembly are neon specifiers, not assembly variant
179 // specifiers.
180 NoAsmVariants = true;
182 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
183 // contributes to the alignment of the containing aggregate in the same way
184 // a plain (non bit-field) member of that type would, without exception for
185 // zero-sized or anonymous bit-fields."
186 assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
187 UseZeroLengthBitfieldAlignment = true;
189 // AArch64 targets default to using the ARM C++ ABI.
190 TheCXXABI.set(TargetCXXABI::GenericAArch64);
192 if (Triple.getOS() == llvm::Triple::Linux)
193 this->MCountName = "\01_mcount";
194 else if (Triple.getOS() == llvm::Triple::UnknownOS)
195 this->MCountName =
196 Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
199 StringRef AArch64TargetInfo::getABI() const { return ABI; }
201 bool AArch64TargetInfo::setABI(const std::string &Name) {
202 if (Name != "aapcs" && Name != "darwinpcs")
203 return false;
205 ABI = Name;
206 return true;
209 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
210 BranchProtectionInfo &BPI,
211 StringRef &Err) const {
212 llvm::ARM::ParsedBranchProtection PBP;
213 if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
214 return false;
216 BPI.SignReturnAddr =
217 llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
218 .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
219 .Case("all", LangOptions::SignReturnAddressScopeKind::All)
220 .Default(LangOptions::SignReturnAddressScopeKind::None);
222 if (PBP.Key == "a_key")
223 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
224 else
225 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
227 BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
228 return true;
231 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
232 return Name == "generic" || llvm::AArch64::parseCpu(Name);
235 bool AArch64TargetInfo::setCPU(const std::string &Name) {
236 return isValidCPUName(Name);
239 void AArch64TargetInfo::fillValidCPUList(
240 SmallVectorImpl<StringRef> &Values) const {
241 llvm::AArch64::fillValidCPUArchList(Values);
244 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
245 MacroBuilder &Builder) const {
246 Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
249 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
250 MacroBuilder &Builder) const {
251 // Also include the ARMv8.1 defines
252 getTargetDefinesARMV81A(Opts, Builder);
255 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
256 MacroBuilder &Builder) const {
257 Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
258 Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
259 Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
260 // Also include the Armv8.2 defines
261 getTargetDefinesARMV82A(Opts, Builder);
264 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
265 MacroBuilder &Builder) const {
266 // Also include the Armv8.3 defines
267 getTargetDefinesARMV83A(Opts, Builder);
270 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
271 MacroBuilder &Builder) const {
272 Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
273 Builder.defineMacro("__ARM_FEATURE_BTI", "1");
274 // Also include the Armv8.4 defines
275 getTargetDefinesARMV84A(Opts, Builder);
278 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
279 MacroBuilder &Builder) const {
280 // Also include the Armv8.5 defines
281 // FIXME: Armv8.6 makes the following extensions mandatory:
282 // - __ARM_FEATURE_BF16
283 // - __ARM_FEATURE_MATMUL_INT8
284 // Handle them here.
285 getTargetDefinesARMV85A(Opts, Builder);
288 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
289 MacroBuilder &Builder) const {
290 // Also include the Armv8.6 defines
291 getTargetDefinesARMV86A(Opts, Builder);
294 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
295 MacroBuilder &Builder) const {
296 // Also include the Armv8.7 defines
297 getTargetDefinesARMV87A(Opts, Builder);
300 void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
301 MacroBuilder &Builder) const {
302 // Also include the Armv8.8 defines
303 getTargetDefinesARMV88A(Opts, Builder);
306 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
307 MacroBuilder &Builder) const {
308 // Armv9-A maps to Armv8.5-A
309 getTargetDefinesARMV85A(Opts, Builder);
312 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
313 MacroBuilder &Builder) const {
314 // Armv9.1-A maps to Armv8.6-A
315 getTargetDefinesARMV86A(Opts, Builder);
318 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
319 MacroBuilder &Builder) const {
320 // Armv9.2-A maps to Armv8.7-A
321 getTargetDefinesARMV87A(Opts, Builder);
324 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
325 MacroBuilder &Builder) const {
326 // Armv9.3-A maps to Armv8.8-A
327 getTargetDefinesARMV88A(Opts, Builder);
330 void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
331 MacroBuilder &Builder) const {
332 // Armv9.4-A maps to Armv8.9-A
333 getTargetDefinesARMV89A(Opts, Builder);
336 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
337 MacroBuilder &Builder) const {
338 // Target identification.
339 if (getTriple().isWindowsArm64EC()) {
340 // Define the same set of macros as would be defined on x86_64 to ensure that
341 // ARM64EC datatype layouts match those of x86_64 compiled code
342 Builder.defineMacro("__amd64__");
343 Builder.defineMacro("__amd64");
344 Builder.defineMacro("__x86_64");
345 Builder.defineMacro("__x86_64__");
346 Builder.defineMacro("__arm64ec__");
347 } else {
348 Builder.defineMacro("__aarch64__");
351 // Inline assembly supports AArch64 flag outputs.
352 Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
354 std::string CodeModel = getTargetOpts().CodeModel;
355 if (CodeModel == "default")
356 CodeModel = "small";
357 for (char &c : CodeModel)
358 c = toupper(c);
359 Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
361 // ACLE predefines. Many can only have one possible value on v8 AArch64.
362 Builder.defineMacro("__ARM_ACLE", "200");
363 Builder.defineMacro("__ARM_ARCH",
364 std::to_string(ArchInfo->Version.getMajor()));
365 Builder.defineMacro("__ARM_ARCH_PROFILE",
366 std::string("'") + (char)ArchInfo->Profile + "'");
368 Builder.defineMacro("__ARM_64BIT_STATE", "1");
369 Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
370 Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
372 Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
373 Builder.defineMacro("__ARM_FEATURE_FMA", "1");
374 Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
375 Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
376 Builder.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility
377 Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
378 Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
380 Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
382 // 0xe implies support for half, single and double precision operations.
383 if (FPU & FPUMode)
384 Builder.defineMacro("__ARM_FP", "0xE");
386 // PCS specifies this for SysV variants, which is all we support. Other ABIs
387 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
388 Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
389 Builder.defineMacro("__ARM_FP16_ARGS", "1");
391 if (Opts.UnsafeFPMath)
392 Builder.defineMacro("__ARM_FP_FAST", "1");
394 Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
395 Twine(Opts.WCharSize ? Opts.WCharSize : 4));
397 Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
399 if (FPU & NeonMode) {
400 Builder.defineMacro("__ARM_NEON", "1");
401 // 64-bit NEON supports half, single and double precision operations.
402 Builder.defineMacro("__ARM_NEON_FP", "0xE");
405 if (FPU & SveMode)
406 Builder.defineMacro("__ARM_FEATURE_SVE", "1");
408 if ((FPU & NeonMode) && (FPU & SveMode))
409 Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
411 if (HasSVE2)
412 Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
414 if (HasSVE2 && HasSVE2AES)
415 Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
417 if (HasSVE2 && HasSVE2BitPerm)
418 Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
420 if (HasSVE2 && HasSVE2SHA3)
421 Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
423 if (HasSVE2 && HasSVE2SM4)
424 Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
426 if (HasCRC)
427 Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
429 if (HasRCPC3)
430 Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
431 else if (HasRCPC)
432 Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
434 if (HasFMV)
435 Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
437 // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
438 // macros for AES, SHA2, SHA3 and SM4
439 if (HasAES && HasSHA2)
440 Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
442 if (HasAES)
443 Builder.defineMacro("__ARM_FEATURE_AES", "1");
445 if (HasSHA2)
446 Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
448 if (HasSHA3) {
449 Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
450 Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
453 if (HasSM4) {
454 Builder.defineMacro("__ARM_FEATURE_SM3", "1");
455 Builder.defineMacro("__ARM_FEATURE_SM4", "1");
458 if (HasPAuth)
459 Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
461 if (HasUnaligned)
462 Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
464 if ((FPU & NeonMode) && HasFullFP16)
465 Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
466 if (HasFullFP16)
467 Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
469 if (HasDotProd)
470 Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
472 if (HasMTE)
473 Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
475 if (HasTME)
476 Builder.defineMacro("__ARM_FEATURE_TME", "1");
478 if (HasMatMul)
479 Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
481 if (HasLSE)
482 Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
484 if (HasBFloat16) {
485 Builder.defineMacro("__ARM_FEATURE_BF16", "1");
486 Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
487 Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
488 Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
491 if ((FPU & SveMode) && HasBFloat16) {
492 Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
495 if ((FPU & SveMode) && HasMatmulFP64)
496 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
498 if ((FPU & SveMode) && HasMatmulFP32)
499 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
501 if ((FPU & SveMode) && HasMatMul)
502 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
504 if ((FPU & NeonMode) && HasFP16FML)
505 Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
507 if (Opts.hasSignReturnAddress()) {
508 // Bitmask:
509 // 0: Protection using the A key
510 // 1: Protection using the B key
511 // 2: Protection including leaf functions
512 unsigned Value = 0;
514 if (Opts.isSignReturnAddressWithAKey())
515 Value |= (1 << 0);
516 else
517 Value |= (1 << 1);
519 if (Opts.isSignReturnAddressScopeAll())
520 Value |= (1 << 2);
522 Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
525 if (Opts.BranchTargetEnforcement)
526 Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
528 if (HasLS64)
529 Builder.defineMacro("__ARM_FEATURE_LS64", "1");
531 if (HasRandGen)
532 Builder.defineMacro("__ARM_FEATURE_RNG", "1");
534 if (HasMOPS)
535 Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
537 if (HasD128)
538 Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
540 if (*ArchInfo == llvm::AArch64::ARMV8_1A)
541 getTargetDefinesARMV81A(Opts, Builder);
542 else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
543 getTargetDefinesARMV82A(Opts, Builder);
544 else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
545 getTargetDefinesARMV83A(Opts, Builder);
546 else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
547 getTargetDefinesARMV84A(Opts, Builder);
548 else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
549 getTargetDefinesARMV85A(Opts, Builder);
550 else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
551 getTargetDefinesARMV86A(Opts, Builder);
552 else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
553 getTargetDefinesARMV87A(Opts, Builder);
554 else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
555 getTargetDefinesARMV88A(Opts, Builder);
556 else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
557 getTargetDefinesARMV89A(Opts, Builder);
558 else if (*ArchInfo == llvm::AArch64::ARMV9A)
559 getTargetDefinesARMV9A(Opts, Builder);
560 else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
561 getTargetDefinesARMV91A(Opts, Builder);
562 else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
563 getTargetDefinesARMV92A(Opts, Builder);
564 else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
565 getTargetDefinesARMV93A(Opts, Builder);
566 else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
567 getTargetDefinesARMV94A(Opts, Builder);
569 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
570 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
571 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
572 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
573 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
575 // Allow detection of fast FMA support.
576 Builder.defineMacro("__FP_FAST_FMA", "1");
577 Builder.defineMacro("__FP_FAST_FMAF", "1");
579 // C/C++ operators work on both VLS and VLA SVE types
580 if (FPU & SveMode)
581 Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
583 if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
584 Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
588 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
589 return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
590 Builtin::FirstTSBuiltin);
593 std::optional<std::pair<unsigned, unsigned>>
594 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
595 if (LangOpts.VScaleMin || LangOpts.VScaleMax)
596 return std::pair<unsigned, unsigned>(
597 LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
599 if (hasFeature("sve"))
600 return std::pair<unsigned, unsigned>(1, 16);
602 return std::nullopt;
605 unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
606 if (Name == "default")
607 return 0;
608 for (const auto &E : llvm::AArch64::Extensions)
609 if (Name == E.Name)
610 return E.FmvPriority;
611 return 0;
614 unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
615 // Take the maximum priority as per feature cost, so more features win.
616 return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
619 bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
620 auto F = llvm::find_if(llvm::AArch64::Extensions, [&](const auto &E) {
621 return Name == E.Name && !E.DependentFeatures.empty();
623 return F != std::end(llvm::AArch64::Extensions);
626 StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
627 auto F = llvm::find_if(llvm::AArch64::Extensions,
628 [&](const auto &E) { return Name == E.Name; });
629 return F != std::end(llvm::AArch64::Extensions) ? F->DependentFeatures
630 : StringRef();
633 bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
634 for (const auto &E : llvm::AArch64::Extensions)
635 if (FeatureStr == E.Name)
636 return true;
637 return false;
640 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
641 return llvm::StringSwitch<bool>(Feature)
642 .Cases("aarch64", "arm64", "arm", true)
643 .Case("fmv", HasFMV)
644 .Cases("neon", "fp", "simd", FPU & NeonMode)
645 .Case("jscvt", HasJSCVT)
646 .Case("fcma", HasFCMA)
647 .Case("rng", HasRandGen)
648 .Case("flagm", HasFlagM)
649 .Case("flagm2", HasAlternativeNZCV)
650 .Case("fp16fml", HasFP16FML)
651 .Case("dotprod", HasDotProd)
652 .Case("sm4", HasSM4)
653 .Case("rdm", HasRDM)
654 .Case("lse", HasLSE)
655 .Case("crc", HasCRC)
656 .Case("sha2", HasSHA2)
657 .Case("sha3", HasSHA3)
658 .Cases("aes", "pmull", HasAES)
659 .Cases("fp16", "fullfp16", HasFullFP16)
660 .Case("dit", HasDIT)
661 .Case("dpb", HasCCPP)
662 .Case("dpb2", HasCCDP)
663 .Case("rcpc", HasRCPC)
664 .Case("frintts", HasFRInt3264)
665 .Case("i8mm", HasMatMul)
666 .Case("bf16", HasBFloat16)
667 .Case("sve", FPU & SveMode)
668 .Case("sve-bf16", FPU & SveMode && HasBFloat16)
669 .Case("sve-i8mm", FPU & SveMode && HasMatMul)
670 .Case("f32mm", FPU & SveMode && HasMatmulFP32)
671 .Case("f64mm", FPU & SveMode && HasMatmulFP64)
672 .Case("sve2", FPU & SveMode && HasSVE2)
673 .Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
674 .Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
675 .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
676 .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
677 .Case("sme", HasSME)
678 .Case("sme-f64f64", HasSMEF64F64)
679 .Case("sme-i16i64", HasSMEI16I64)
680 .Cases("memtag", "memtag2", HasMTE)
681 .Case("sb", HasSB)
682 .Case("predres", HasPredRes)
683 .Cases("ssbs", "ssbs2", HasSSBS)
684 .Case("bti", HasBTI)
685 .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
686 .Case("wfxt", HasWFxT)
687 .Case("rcpc3", HasRCPC3)
688 .Default(false);
691 void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
692 StringRef Name, bool Enabled) const {
693 Features[Name] = Enabled;
694 // If the feature is an architecture feature (like v8.2a), add all previous
695 // architecture versions and any dependant target features.
696 const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
697 llvm::AArch64::ArchInfo::findBySubArch(Name);
699 if (!ArchInfo)
700 return; // Not an architecture, nothing more to do.
702 // Disabling an architecture feature does not affect dependent features
703 if (!Enabled)
704 return;
706 for (const auto *OtherArch : llvm::AArch64::ArchInfos)
707 if (ArchInfo->implies(*OtherArch))
708 Features[OtherArch->getSubArch()] = true;
710 // Set any features implied by the architecture
711 std::vector<StringRef> CPUFeats;
712 if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
713 for (auto F : CPUFeats) {
714 assert(F[0] == '+' && "Expected + in target feature!");
715 Features[F.drop_front(1)] = true;
720 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
721 DiagnosticsEngine &Diags) {
722 for (const auto &Feature : Features) {
723 if (Feature == "-fp-armv8")
724 HasNoFP = true;
725 if (Feature == "-neon")
726 HasNoNeon = true;
727 if (Feature == "-sve")
728 HasNoSVE = true;
730 if (Feature == "+neon" || Feature == "+fp-armv8")
731 FPU |= NeonMode;
732 if (Feature == "+jscvt") {
733 HasJSCVT = true;
734 FPU |= NeonMode;
736 if (Feature == "+fcma") {
737 HasFCMA = true;
738 FPU |= NeonMode;
741 if (Feature == "+sve") {
742 FPU |= NeonMode;
743 FPU |= SveMode;
744 HasFullFP16 = true;
746 if (Feature == "+sve2") {
747 FPU |= NeonMode;
748 FPU |= SveMode;
749 HasFullFP16 = true;
750 HasSVE2 = true;
752 if (Feature == "+sve2-aes") {
753 FPU |= NeonMode;
754 FPU |= SveMode;
755 HasFullFP16 = true;
756 HasSVE2 = true;
757 HasSVE2AES = true;
759 if (Feature == "+sve2-sha3") {
760 FPU |= NeonMode;
761 FPU |= SveMode;
762 HasFullFP16 = true;
763 HasSVE2 = true;
764 HasSVE2SHA3 = true;
766 if (Feature == "+sve2-sm4") {
767 FPU |= NeonMode;
768 FPU |= SveMode;
769 HasFullFP16 = true;
770 HasSVE2 = true;
771 HasSVE2SM4 = true;
773 if (Feature == "+sve2-bitperm") {
774 FPU |= NeonMode;
775 FPU |= SveMode;
776 HasFullFP16 = true;
777 HasSVE2 = true;
778 HasSVE2BitPerm = true;
780 if (Feature == "+f32mm") {
781 FPU |= NeonMode;
782 FPU |= SveMode;
783 HasFullFP16 = true;
784 HasMatmulFP32 = true;
786 if (Feature == "+f64mm") {
787 FPU |= NeonMode;
788 FPU |= SveMode;
789 HasFullFP16 = true;
790 HasMatmulFP64 = true;
792 if (Feature == "+sme") {
793 HasSME = true;
794 HasBFloat16 = true;
795 HasFullFP16 = true;
797 if (Feature == "+sme-f64f64") {
798 HasSME = true;
799 HasSMEF64F64 = true;
800 HasBFloat16 = true;
801 HasFullFP16 = true;
803 if (Feature == "+sme-i16i64") {
804 HasSME = true;
805 HasSMEI16I64 = true;
806 HasBFloat16 = true;
807 HasFullFP16 = true;
809 if (Feature == "+sb")
810 HasSB = true;
811 if (Feature == "+predres")
812 HasPredRes = true;
813 if (Feature == "+ssbs")
814 HasSSBS = true;
815 if (Feature == "+bti")
816 HasBTI = true;
817 if (Feature == "+wfxt")
818 HasWFxT = true;
819 if (Feature == "-fmv")
820 HasFMV = false;
821 if (Feature == "+crc")
822 HasCRC = true;
823 if (Feature == "+rcpc")
824 HasRCPC = true;
825 if (Feature == "+aes") {
826 FPU |= NeonMode;
827 HasAES = true;
829 if (Feature == "+sha2") {
830 FPU |= NeonMode;
831 HasSHA2 = true;
833 if (Feature == "+sha3") {
834 FPU |= NeonMode;
835 HasSHA2 = true;
836 HasSHA3 = true;
838 if (Feature == "+rdm") {
839 FPU |= NeonMode;
840 HasRDM = true;
842 if (Feature == "+dit")
843 HasDIT = true;
844 if (Feature == "+cccp")
845 HasCCPP = true;
846 if (Feature == "+ccdp") {
847 HasCCPP = true;
848 HasCCDP = true;
850 if (Feature == "+fptoint")
851 HasFRInt3264 = true;
852 if (Feature == "+sm4") {
853 FPU |= NeonMode;
854 HasSM4 = true;
856 if (Feature == "+strict-align")
857 HasUnaligned = false;
858 // All predecessor archs are added but select the latest one for ArchKind.
859 if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
860 ArchInfo = &llvm::AArch64::ARMV8A;
861 if (Feature == "+v8.1a" &&
862 ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
863 ArchInfo = &llvm::AArch64::ARMV8_1A;
864 if (Feature == "+v8.2a" &&
865 ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
866 ArchInfo = &llvm::AArch64::ARMV8_2A;
867 if (Feature == "+v8.3a" &&
868 ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
869 ArchInfo = &llvm::AArch64::ARMV8_3A;
870 if (Feature == "+v8.4a" &&
871 ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
872 ArchInfo = &llvm::AArch64::ARMV8_4A;
873 if (Feature == "+v8.5a" &&
874 ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
875 ArchInfo = &llvm::AArch64::ARMV8_5A;
876 if (Feature == "+v8.6a" &&
877 ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
878 ArchInfo = &llvm::AArch64::ARMV8_6A;
879 if (Feature == "+v8.7a" &&
880 ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
881 ArchInfo = &llvm::AArch64::ARMV8_7A;
882 if (Feature == "+v8.8a" &&
883 ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
884 ArchInfo = &llvm::AArch64::ARMV8_8A;
885 if (Feature == "+v8.9a" &&
886 ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
887 ArchInfo = &llvm::AArch64::ARMV8_9A;
888 if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
889 ArchInfo = &llvm::AArch64::ARMV9A;
890 if (Feature == "+v9.1a" &&
891 ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
892 ArchInfo = &llvm::AArch64::ARMV9_1A;
893 if (Feature == "+v9.2a" &&
894 ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
895 ArchInfo = &llvm::AArch64::ARMV9_2A;
896 if (Feature == "+v9.3a" &&
897 ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
898 ArchInfo = &llvm::AArch64::ARMV9_3A;
899 if (Feature == "+v9.4a" &&
900 ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
901 ArchInfo = &llvm::AArch64::ARMV9_4A;
902 if (Feature == "+v8r")
903 ArchInfo = &llvm::AArch64::ARMV8R;
904 if (Feature == "+fullfp16") {
905 FPU |= NeonMode;
906 HasFullFP16 = true;
908 if (Feature == "+dotprod") {
909 FPU |= NeonMode;
910 HasDotProd = true;
912 if (Feature == "+fp16fml") {
913 FPU |= NeonMode;
914 HasFullFP16 = true;
915 HasFP16FML = true;
917 if (Feature == "+mte")
918 HasMTE = true;
919 if (Feature == "+tme")
920 HasTME = true;
921 if (Feature == "+pauth")
922 HasPAuth = true;
923 if (Feature == "+i8mm")
924 HasMatMul = true;
925 if (Feature == "+bf16")
926 HasBFloat16 = true;
927 if (Feature == "+lse")
928 HasLSE = true;
929 if (Feature == "+ls64")
930 HasLS64 = true;
931 if (Feature == "+rand")
932 HasRandGen = true;
933 if (Feature == "+flagm")
934 HasFlagM = true;
935 if (Feature == "+altnzcv") {
936 HasFlagM = true;
937 HasAlternativeNZCV = true;
939 if (Feature == "+mops")
940 HasMOPS = true;
941 if (Feature == "+d128")
942 HasD128 = true;
943 if (Feature == "+gcs")
944 HasGCS = true;
945 if (Feature == "+rcpc3")
946 HasRCPC3 = true;
949 // Check features that are manually disabled by command line options.
950 // This needs to be checked after architecture-related features are handled,
951 // making sure they are properly disabled when required.
952 for (const auto &Feature : Features) {
953 if (Feature == "-d128")
954 HasD128 = false;
957 setDataLayout();
958 setArchFeatures();
960 if (HasNoFP) {
961 FPU &= ~FPUMode;
962 FPU &= ~NeonMode;
963 FPU &= ~SveMode;
965 if (HasNoNeon) {
966 FPU &= ~NeonMode;
967 FPU &= ~SveMode;
969 if (HasNoSVE)
970 FPU &= ~SveMode;
972 return true;
975 bool AArch64TargetInfo::initFeatureMap(
976 llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
977 const std::vector<std::string> &FeaturesVec) const {
978 std::vector<std::string> UpdatedFeaturesVec;
979 // Parse the CPU and add any implied features.
980 std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
981 if (CpuInfo) {
982 auto Exts = CpuInfo->getImpliedExtensions();
983 std::vector<StringRef> CPUFeats;
984 llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
985 for (auto F : CPUFeats) {
986 assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
987 UpdatedFeaturesVec.push_back(F.str());
991 // Process target and dependent features. This is done in two loops collecting
992 // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
993 // add target '+/-'features that can later disable some of features added on
994 // the first loop. Function Multi Versioning features begin with '?'.
995 for (const auto &Feature : FeaturesVec)
996 if (((Feature[0] == '?' || Feature[0] == '+')) &&
997 AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
998 StringRef DepFeatures =
999 AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
1000 SmallVector<StringRef, 1> AttrFeatures;
1001 DepFeatures.split(AttrFeatures, ",");
1002 for (auto F : AttrFeatures)
1003 UpdatedFeaturesVec.push_back(F.str());
1005 for (const auto &Feature : FeaturesVec)
1006 if (Feature[0] != '?') {
1007 std::string UpdatedFeature = Feature;
1008 if (Feature[0] == '+') {
1009 std::optional<llvm::AArch64::ExtensionInfo> Extension =
1010 llvm::AArch64::parseArchExtension(Feature.substr(1));
1011 if (Extension)
1012 UpdatedFeature = Extension->Feature.str();
1014 UpdatedFeaturesVec.push_back(UpdatedFeature);
1017 return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
1020 // Parse AArch64 Target attributes, which are a comma separated list of:
1021 // "arch=<arch>" - parsed to features as per -march=..
1022 // "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1023 // "tune=<cpu>" - TuneCPU set to <cpu>
1024 // "feature", "no-feature" - Add (or remove) feature.
1025 // "+feature", "+nofeature" - Add (or remove) feature.
1026 ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1027 ParsedTargetAttr Ret;
1028 if (Features == "default")
1029 return Ret;
1030 SmallVector<StringRef, 1> AttrFeatures;
1031 Features.split(AttrFeatures, ",");
1032 bool FoundArch = false;
1034 auto SplitAndAddFeatures = [](StringRef FeatString,
1035 std::vector<std::string> &Features) {
1036 SmallVector<StringRef, 8> SplitFeatures;
1037 FeatString.split(SplitFeatures, StringRef("+"), -1, false);
1038 for (StringRef Feature : SplitFeatures) {
1039 StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1040 if (!FeatureName.empty())
1041 Features.push_back(FeatureName.str());
1042 else
1043 // Pushing the original feature string to give a sema error later on
1044 // when they get checked.
1045 if (Feature.startswith("no"))
1046 Features.push_back("-" + Feature.drop_front(2).str());
1047 else
1048 Features.push_back("+" + Feature.str());
1052 for (auto &Feature : AttrFeatures) {
1053 Feature = Feature.trim();
1054 if (Feature.startswith("fpmath="))
1055 continue;
1057 if (Feature.startswith("branch-protection=")) {
1058 Ret.BranchProtection = Feature.split('=').second.trim();
1059 continue;
1062 if (Feature.startswith("arch=")) {
1063 if (FoundArch)
1064 Ret.Duplicate = "arch=";
1065 FoundArch = true;
1066 std::pair<StringRef, StringRef> Split =
1067 Feature.split("=").second.trim().split("+");
1068 const std::optional<llvm::AArch64::ArchInfo> AI =
1069 llvm::AArch64::parseArch(Split.first);
1071 // Parse the architecture version, adding the required features to
1072 // Ret.Features.
1073 if (!AI)
1074 continue;
1075 Ret.Features.push_back(AI->ArchFeature.str());
1076 // Add any extra features, after the +
1077 SplitAndAddFeatures(Split.second, Ret.Features);
1078 } else if (Feature.startswith("cpu=")) {
1079 if (!Ret.CPU.empty())
1080 Ret.Duplicate = "cpu=";
1081 else {
1082 // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1083 // "+feat" features.
1084 std::pair<StringRef, StringRef> Split =
1085 Feature.split("=").second.trim().split("+");
1086 Ret.CPU = Split.first;
1087 SplitAndAddFeatures(Split.second, Ret.Features);
1089 } else if (Feature.startswith("tune=")) {
1090 if (!Ret.Tune.empty())
1091 Ret.Duplicate = "tune=";
1092 else
1093 Ret.Tune = Feature.split("=").second.trim();
1094 } else if (Feature.startswith("+")) {
1095 SplitAndAddFeatures(Feature, Ret.Features);
1096 } else if (Feature.startswith("no-")) {
1097 StringRef FeatureName =
1098 llvm::AArch64::getArchExtFeature(Feature.split("-").second);
1099 if (!FeatureName.empty())
1100 Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
1101 else
1102 Ret.Features.push_back("-" + Feature.split("-").second.str());
1103 } else {
1104 // Try parsing the string to the internal target feature name. If it is
1105 // invalid, add the original string (which could already be an internal
1106 // name). These should be checked later by isValidFeatureName.
1107 StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1108 if (!FeatureName.empty())
1109 Ret.Features.push_back(FeatureName.str());
1110 else
1111 Ret.Features.push_back("+" + Feature.str());
1114 return Ret;
1117 bool AArch64TargetInfo::hasBFloat16Type() const {
1118 return true;
1121 TargetInfo::CallingConvCheckResult
1122 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1123 switch (CC) {
1124 case CC_C:
1125 case CC_Swift:
1126 case CC_SwiftAsync:
1127 case CC_PreserveMost:
1128 case CC_PreserveAll:
1129 case CC_OpenCLKernel:
1130 case CC_AArch64VectorCall:
1131 case CC_AArch64SVEPCS:
1132 case CC_Win64:
1133 return CCCR_OK;
1134 default:
1135 return CCCR_Warning;
1139 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1141 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1142 return TargetInfo::AArch64ABIBuiltinVaList;
1145 const char *const AArch64TargetInfo::GCCRegNames[] = {
1146 // 32-bit Integer registers
1147 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1148 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1149 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1151 // 64-bit Integer registers
1152 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1153 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1154 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1156 // 32-bit floating point regsisters
1157 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1158 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1159 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1161 // 64-bit floating point regsisters
1162 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1163 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1164 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1166 // Neon vector registers
1167 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1168 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1169 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1171 // SVE vector registers
1172 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
1173 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1174 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1176 // SVE predicate registers
1177 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
1178 "p11", "p12", "p13", "p14", "p15",
1180 // SVE predicate-as-counter registers
1181 "pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
1182 "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15"
1185 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1186 return llvm::ArrayRef(GCCRegNames);
1189 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1190 {{"w31"}, "wsp"},
1191 {{"x31"}, "sp"},
1192 // GCC rN registers are aliases of xN registers.
1193 {{"r0"}, "x0"},
1194 {{"r1"}, "x1"},
1195 {{"r2"}, "x2"},
1196 {{"r3"}, "x3"},
1197 {{"r4"}, "x4"},
1198 {{"r5"}, "x5"},
1199 {{"r6"}, "x6"},
1200 {{"r7"}, "x7"},
1201 {{"r8"}, "x8"},
1202 {{"r9"}, "x9"},
1203 {{"r10"}, "x10"},
1204 {{"r11"}, "x11"},
1205 {{"r12"}, "x12"},
1206 {{"r13"}, "x13"},
1207 {{"r14"}, "x14"},
1208 {{"r15"}, "x15"},
1209 {{"r16"}, "x16"},
1210 {{"r17"}, "x17"},
1211 {{"r18"}, "x18"},
1212 {{"r19"}, "x19"},
1213 {{"r20"}, "x20"},
1214 {{"r21"}, "x21"},
1215 {{"r22"}, "x22"},
1216 {{"r23"}, "x23"},
1217 {{"r24"}, "x24"},
1218 {{"r25"}, "x25"},
1219 {{"r26"}, "x26"},
1220 {{"r27"}, "x27"},
1221 {{"r28"}, "x28"},
1222 {{"r29", "x29"}, "fp"},
1223 {{"r30", "x30"}, "lr"},
1224 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1225 // don't want to substitute one of these for a different-sized one.
1228 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1229 return llvm::ArrayRef(GCCRegAliases);
1232 // Returns the length of cc constraint.
1233 static unsigned matchAsmCCConstraint(const char *Name) {
1234 constexpr unsigned len = 5;
1235 auto RV = llvm::StringSwitch<unsigned>(Name)
1236 .Case("@cceq", len)
1237 .Case("@ccne", len)
1238 .Case("@cchs", len)
1239 .Case("@cccs", len)
1240 .Case("@cccc", len)
1241 .Case("@cclo", len)
1242 .Case("@ccmi", len)
1243 .Case("@ccpl", len)
1244 .Case("@ccvs", len)
1245 .Case("@ccvc", len)
1246 .Case("@cchi", len)
1247 .Case("@ccls", len)
1248 .Case("@ccge", len)
1249 .Case("@cclt", len)
1250 .Case("@ccgt", len)
1251 .Case("@ccle", len)
1252 .Default(0);
1253 return RV;
1256 std::string
1257 AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1258 std::string R;
1259 switch (*Constraint) {
1260 case 'U': // Three-character constraint; add "@3" hint for later parsing.
1261 R = std::string("@3") + std::string(Constraint, 3);
1262 Constraint += 2;
1263 break;
1264 case '@':
1265 if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
1266 std::string Converted = "{" + std::string(Constraint, Len) + "}";
1267 Constraint += Len - 1;
1268 return Converted;
1270 return std::string(1, *Constraint);
1271 default:
1272 R = TargetInfo::convertConstraint(Constraint);
1273 break;
1275 return R;
1278 bool AArch64TargetInfo::validateAsmConstraint(
1279 const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1280 switch (*Name) {
1281 default:
1282 return false;
1283 case 'w': // Floating point and SIMD registers (V0-V31)
1284 Info.setAllowsRegister();
1285 return true;
1286 case 'I': // Constant that can be used with an ADD instruction
1287 case 'J': // Constant that can be used with a SUB instruction
1288 case 'K': // Constant that can be used with a 32-bit logical instruction
1289 case 'L': // Constant that can be used with a 64-bit logical instruction
1290 case 'M': // Constant that can be used as a 32-bit MOV immediate
1291 case 'N': // Constant that can be used as a 64-bit MOV immediate
1292 case 'Y': // Floating point constant zero
1293 case 'Z': // Integer constant zero
1294 return true;
1295 case 'Q': // A memory reference with base register and no offset
1296 Info.setAllowsMemory();
1297 return true;
1298 case 'S': // A symbolic address
1299 Info.setAllowsRegister();
1300 return true;
1301 case 'U':
1302 if (Name[1] == 'p' &&
1303 (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1304 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1305 Info.setAllowsRegister();
1306 Name += 2;
1307 return true;
1309 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1310 // Utf: A memory address suitable for ldp/stp in TF mode.
1311 // Usa: An absolute symbolic address.
1312 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1314 // Better to return an error saying that it's an unrecognised constraint
1315 // even if this is a valid constraint in gcc.
1316 return false;
1317 case 'z': // Zero register, wzr or xzr
1318 Info.setAllowsRegister();
1319 return true;
1320 case 'x': // Floating point and SIMD registers (V0-V15)
1321 Info.setAllowsRegister();
1322 return true;
1323 case 'y': // SVE registers (V0-V7)
1324 Info.setAllowsRegister();
1325 return true;
1326 case '@':
1327 // CC condition
1328 if (const unsigned Len = matchAsmCCConstraint(Name)) {
1329 Name += Len - 1;
1330 Info.setAllowsRegister();
1331 return true;
1334 return false;
1337 bool AArch64TargetInfo::validateConstraintModifier(
1338 StringRef Constraint, char Modifier, unsigned Size,
1339 std::string &SuggestedModifier) const {
1340 // Strip off constraint modifiers.
1341 while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
1342 Constraint = Constraint.substr(1);
1344 switch (Constraint[0]) {
1345 default:
1346 return true;
1347 case 'z':
1348 case 'r': {
1349 switch (Modifier) {
1350 case 'x':
1351 case 'w':
1352 // For now assume that the person knows what they're
1353 // doing with the modifier.
1354 return true;
1355 default:
1356 // By default an 'r' constraint will be in the 'x'
1357 // registers.
1358 if (Size == 64)
1359 return true;
1361 if (Size == 512)
1362 return HasLS64;
1364 SuggestedModifier = "w";
1365 return false;
1371 std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1373 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1374 if (RegNo == 0)
1375 return 0;
1376 if (RegNo == 1)
1377 return 1;
1378 return -1;
1381 bool AArch64TargetInfo::hasInt128Type() const { return true; }
1383 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1384 const TargetOptions &Opts)
1385 : AArch64TargetInfo(Triple, Opts) {}
1387 void AArch64leTargetInfo::setDataLayout() {
1388 if (getTriple().isOSBinFormatMachO()) {
1389 if(getTriple().isArch32Bit())
1390 resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
1391 else
1392 resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
1393 } else
1394 resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1397 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1398 MacroBuilder &Builder) const {
1399 Builder.defineMacro("__AARCH64EL__");
1400 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1403 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1404 const TargetOptions &Opts)
1405 : AArch64TargetInfo(Triple, Opts) {}
1407 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1408 MacroBuilder &Builder) const {
1409 Builder.defineMacro("__AARCH64EB__");
1410 Builder.defineMacro("__AARCH_BIG_ENDIAN");
1411 Builder.defineMacro("__ARM_BIG_ENDIAN");
1412 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1415 void AArch64beTargetInfo::setDataLayout() {
1416 assert(!getTriple().isOSBinFormatMachO());
1417 resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1420 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1421 const TargetOptions &Opts)
1422 : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1424 // This is an LLP64 platform.
1425 // int:4, long:4, long long:8, long double:8.
1426 IntWidth = IntAlign = 32;
1427 LongWidth = LongAlign = 32;
1428 DoubleAlign = LongLongAlign = 64;
1429 LongDoubleWidth = LongDoubleAlign = 64;
1430 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1431 IntMaxType = SignedLongLong;
1432 Int64Type = SignedLongLong;
1433 SizeType = UnsignedLongLong;
1434 PtrDiffType = SignedLongLong;
1435 IntPtrType = SignedLongLong;
1438 void WindowsARM64TargetInfo::setDataLayout() {
1439 resetDataLayout(Triple.isOSBinFormatMachO()
1440 ? "e-m:o-i64:64-i128:128-n32:64-S128"
1441 : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1442 Triple.isOSBinFormatMachO() ? "_" : "");
1445 TargetInfo::BuiltinVaListKind
1446 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1447 return TargetInfo::CharPtrBuiltinVaList;
1450 TargetInfo::CallingConvCheckResult
1451 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1452 switch (CC) {
1453 case CC_X86StdCall:
1454 case CC_X86ThisCall:
1455 case CC_X86FastCall:
1456 case CC_X86VectorCall:
1457 return CCCR_Ignore;
1458 case CC_C:
1459 case CC_OpenCLKernel:
1460 case CC_PreserveMost:
1461 case CC_PreserveAll:
1462 case CC_Swift:
1463 case CC_SwiftAsync:
1464 case CC_Win64:
1465 return CCCR_OK;
1466 default:
1467 return CCCR_Warning;
1471 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1472 const TargetOptions &Opts)
1473 : WindowsARM64TargetInfo(Triple, Opts) {
1474 TheCXXABI.set(TargetCXXABI::Microsoft);
1477 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1478 MacroBuilder &Builder) const {
1479 WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1480 if (getTriple().isWindowsArm64EC()) {
1481 Builder.defineMacro("_M_X64", "100");
1482 Builder.defineMacro("_M_AMD64", "100");
1483 Builder.defineMacro("_M_ARM64EC", "1");
1484 } else {
1485 Builder.defineMacro("_M_ARM64", "1");
1489 TargetInfo::CallingConvKind
1490 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1491 return CCK_MicrosoftWin64;
1494 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
1495 unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
1497 // MSVC does size based alignment for arm64 based on alignment section in
1498 // below document, replicate that to keep alignment consistent with object
1499 // files compiled by MSVC.
1500 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1501 if (TypeSize >= 512) { // TypeSize >= 64 bytes
1502 Align = std::max(Align, 128u); // align type at least 16 bytes
1503 } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
1504 Align = std::max(Align, 64u); // align type at least 8 butes
1505 } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
1506 Align = std::max(Align, 32u); // align type at least 4 bytes
1508 return Align;
1511 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1512 const TargetOptions &Opts)
1513 : WindowsARM64TargetInfo(Triple, Opts) {
1514 TheCXXABI.set(TargetCXXABI::GenericAArch64);
1517 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1518 const TargetOptions &Opts)
1519 : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1520 Int64Type = SignedLongLong;
1521 if (getTriple().isArch32Bit())
1522 IntMaxType = SignedLongLong;
1524 WCharType = SignedInt;
1525 UseSignedCharForObjCBool = false;
1527 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1528 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1530 UseZeroLengthBitfieldAlignment = false;
1532 if (getTriple().isArch32Bit()) {
1533 UseBitFieldTypeAlignment = false;
1534 ZeroLengthBitfieldBoundary = 32;
1535 UseZeroLengthBitfieldAlignment = true;
1536 TheCXXABI.set(TargetCXXABI::WatchOS);
1537 } else
1538 TheCXXABI.set(TargetCXXABI::AppleARM64);
1541 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1542 const llvm::Triple &Triple,
1543 MacroBuilder &Builder) const {
1544 Builder.defineMacro("__AARCH64_SIMD__");
1545 if (Triple.isArch32Bit())
1546 Builder.defineMacro("__ARM64_ARCH_8_32__");
1547 else
1548 Builder.defineMacro("__ARM64_ARCH_8__");
1549 Builder.defineMacro("__ARM_NEON__");
1550 Builder.defineMacro("__REGISTER_PREFIX__", "");
1551 Builder.defineMacro("__arm64", "1");
1552 Builder.defineMacro("__arm64__", "1");
1554 if (Triple.isArm64e())
1555 Builder.defineMacro("__arm64e__", "1");
1557 getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1560 TargetInfo::BuiltinVaListKind
1561 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1562 return TargetInfo::CharPtrBuiltinVaList;
1565 // 64-bit RenderScript is aarch64
1566 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1567 const TargetOptions &Opts)
1568 : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1569 Triple.getOSName(),
1570 Triple.getEnvironmentName()),
1571 Opts) {
1572 IsRenderScriptTarget = true;
1575 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1576 MacroBuilder &Builder) const {
1577 Builder.defineMacro("__RENDERSCRIPT__");
1578 AArch64leTargetInfo::getTargetDefines(Opts, Builder);