[AMDGPU][AsmParser][NFC] Get rid of custom default operand handlers.
[llvm-project.git] / clang / lib / Basic / Targets / AArch64.cpp
blob6de1728b1e50cc081eb02d8275d9d1a637a2f033
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
11 //===----------------------------------------------------------------------===//
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/TargetParser/AArch64TargetParser.h"
21 #include "llvm/TargetParser/ARMTargetParserCommon.h"
22 #include <optional>
24 using namespace clang;
25 using namespace clang::targets;
27 static constexpr Builtin::Info BuiltinInfo[] = {
28 #define BUILTIN(ID, TYPE, ATTRS) \
29 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
30 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
31 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32 #include "clang/Basic/BuiltinsNEON.def"
34 #define BUILTIN(ID, TYPE, ATTRS) \
35 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
36 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
37 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38 #include "clang/Basic/BuiltinsSVE.def"
40 #define BUILTIN(ID, TYPE, ATTRS) \
41 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
42 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
43 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44 #include "clang/Basic/BuiltinsSME.def"
46 #define BUILTIN(ID, TYPE, ATTRS) \
47 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
48 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
49 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
50 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
51 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
52 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
53 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
54 #include "clang/Basic/BuiltinsAArch64.def"
57 void AArch64TargetInfo::setArchFeatures() {
58 if (*ArchInfo == llvm::AArch64::ARMV8R) {
59 HasDotProd = true;
60 HasDIT = true;
61 HasFlagM = true;
62 HasRCPC = true;
63 FPU |= NeonMode;
64 HasCCPP = true;
65 HasCRC = true;
66 HasLSE = true;
67 HasRDM = true;
68 } else if (ArchInfo->Version.getMajor() == 8) {
69 if (ArchInfo->Version.getMinor() >= 7u) {
70 HasWFxT = true;
72 if (ArchInfo->Version.getMinor() >= 6u) {
73 HasBFloat16 = true;
74 HasMatMul = true;
76 if (ArchInfo->Version.getMinor() >= 5u) {
77 HasAlternativeNZCV = true;
78 HasFRInt3264 = true;
79 HasSSBS = true;
80 HasSB = true;
81 HasPredRes = true;
82 HasBTI = true;
84 if (ArchInfo->Version.getMinor() >= 4u) {
85 HasDotProd = true;
86 HasDIT = true;
87 HasFlagM = true;
89 if (ArchInfo->Version.getMinor() >= 3u) {
90 HasRCPC = true;
91 FPU |= NeonMode;
93 if (ArchInfo->Version.getMinor() >= 2u) {
94 HasCCPP = true;
96 if (ArchInfo->Version.getMinor() >= 1u) {
97 HasCRC = true;
98 HasLSE = true;
99 HasRDM = true;
101 } else if (ArchInfo->Version.getMajor() == 9) {
102 if (ArchInfo->Version.getMinor() >= 2u) {
103 HasWFxT = true;
105 if (ArchInfo->Version.getMinor() >= 1u) {
106 HasBFloat16 = true;
107 HasMatMul = true;
109 FPU |= SveMode;
110 HasSVE2 = true;
111 HasFullFP16 = true;
112 HasAlternativeNZCV = true;
113 HasFRInt3264 = true;
114 HasSSBS = true;
115 HasSB = true;
116 HasPredRes = true;
117 HasBTI = true;
118 HasDotProd = true;
119 HasDIT = true;
120 HasFlagM = true;
121 HasRCPC = true;
122 FPU |= NeonMode;
123 HasCCPP = true;
124 HasCRC = true;
125 HasLSE = true;
126 HasRDM = true;
130 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
131 const TargetOptions &Opts)
132 : TargetInfo(Triple), ABI("aapcs") {
133 if (getTriple().isOSOpenBSD()) {
134 Int64Type = SignedLongLong;
135 IntMaxType = SignedLongLong;
136 } else {
137 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
138 WCharType = UnsignedInt;
140 Int64Type = SignedLong;
141 IntMaxType = SignedLong;
144 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
145 HasLegalHalfType = true;
146 HalfArgsAndReturns = true;
147 HasFloat16 = true;
148 HasStrictFP = true;
150 if (Triple.isArch64Bit())
151 LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
152 else
153 LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
155 MaxVectorAlign = 128;
156 MaxAtomicInlineWidth = 128;
157 MaxAtomicPromoteWidth = 128;
159 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
160 LongDoubleFormat = &llvm::APFloat::IEEEquad();
162 BFloat16Width = BFloat16Align = 16;
163 BFloat16Format = &llvm::APFloat::BFloat();
165 // Make __builtin_ms_va_list available.
166 HasBuiltinMSVaList = true;
168 // Make the SVE types available. Note that this deliberately doesn't
169 // depend on SveMode, since in principle it should be possible to turn
170 // SVE on and off within a translation unit. It should also be possible
171 // to compile the global declaration:
173 // __SVInt8_t *ptr;
175 // even without SVE.
176 HasAArch64SVETypes = true;
178 // {} in inline assembly are neon specifiers, not assembly variant
179 // specifiers.
180 NoAsmVariants = true;
182 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
183 // contributes to the alignment of the containing aggregate in the same way
184 // a plain (non bit-field) member of that type would, without exception for
185 // zero-sized or anonymous bit-fields."
186 assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
187 UseZeroLengthBitfieldAlignment = true;
189 // AArch64 targets default to using the ARM C++ ABI.
190 TheCXXABI.set(TargetCXXABI::GenericAArch64);
192 if (Triple.getOS() == llvm::Triple::Linux)
193 this->MCountName = "\01_mcount";
194 else if (Triple.getOS() == llvm::Triple::UnknownOS)
195 this->MCountName =
196 Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
199 StringRef AArch64TargetInfo::getABI() const { return ABI; }
201 bool AArch64TargetInfo::setABI(const std::string &Name) {
202 if (Name != "aapcs" && Name != "darwinpcs")
203 return false;
205 ABI = Name;
206 return true;
209 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
210 BranchProtectionInfo &BPI,
211 StringRef &Err) const {
212 llvm::ARM::ParsedBranchProtection PBP;
213 if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
214 return false;
216 BPI.SignReturnAddr =
217 llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
218 .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
219 .Case("all", LangOptions::SignReturnAddressScopeKind::All)
220 .Default(LangOptions::SignReturnAddressScopeKind::None);
222 if (PBP.Key == "a_key")
223 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
224 else
225 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
227 BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
228 return true;
231 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
232 return Name == "generic" || llvm::AArch64::parseCpu(Name);
235 bool AArch64TargetInfo::setCPU(const std::string &Name) {
236 return isValidCPUName(Name);
239 void AArch64TargetInfo::fillValidCPUList(
240 SmallVectorImpl<StringRef> &Values) const {
241 llvm::AArch64::fillValidCPUArchList(Values);
244 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
245 MacroBuilder &Builder) const {
246 Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
249 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
250 MacroBuilder &Builder) const {
251 // Also include the ARMv8.1 defines
252 getTargetDefinesARMV81A(Opts, Builder);
255 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
256 MacroBuilder &Builder) const {
257 Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
258 Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
259 Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
260 // Also include the Armv8.2 defines
261 getTargetDefinesARMV82A(Opts, Builder);
264 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
265 MacroBuilder &Builder) const {
266 // Also include the Armv8.3 defines
267 getTargetDefinesARMV83A(Opts, Builder);
270 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
271 MacroBuilder &Builder) const {
272 Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
273 Builder.defineMacro("__ARM_FEATURE_BTI", "1");
274 // Also include the Armv8.4 defines
275 getTargetDefinesARMV84A(Opts, Builder);
278 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
279 MacroBuilder &Builder) const {
280 // Also include the Armv8.5 defines
281 // FIXME: Armv8.6 makes the following extensions mandatory:
282 // - __ARM_FEATURE_BF16
283 // - __ARM_FEATURE_MATMUL_INT8
284 // Handle them here.
285 getTargetDefinesARMV85A(Opts, Builder);
288 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
289 MacroBuilder &Builder) const {
290 // Also include the Armv8.6 defines
291 getTargetDefinesARMV86A(Opts, Builder);
294 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
295 MacroBuilder &Builder) const {
296 // Also include the Armv8.7 defines
297 getTargetDefinesARMV87A(Opts, Builder);
300 void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
301 MacroBuilder &Builder) const {
302 // Also include the Armv8.8 defines
303 getTargetDefinesARMV88A(Opts, Builder);
306 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
307 MacroBuilder &Builder) const {
308 // Armv9-A maps to Armv8.5-A
309 getTargetDefinesARMV85A(Opts, Builder);
312 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
313 MacroBuilder &Builder) const {
314 // Armv9.1-A maps to Armv8.6-A
315 getTargetDefinesARMV86A(Opts, Builder);
318 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
319 MacroBuilder &Builder) const {
320 // Armv9.2-A maps to Armv8.7-A
321 getTargetDefinesARMV87A(Opts, Builder);
324 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
325 MacroBuilder &Builder) const {
326 // Armv9.3-A maps to Armv8.8-A
327 getTargetDefinesARMV88A(Opts, Builder);
330 void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
331 MacroBuilder &Builder) const {
332 // Armv9.4-A maps to Armv8.9-A
333 getTargetDefinesARMV89A(Opts, Builder);
336 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
337 MacroBuilder &Builder) const {
338 // Target identification.
339 Builder.defineMacro("__aarch64__");
340 // Inline assembly supports AArch64 flag outputs.
341 Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
343 std::string CodeModel = getTargetOpts().CodeModel;
344 if (CodeModel == "default")
345 CodeModel = "small";
346 for (char &c : CodeModel)
347 c = toupper(c);
348 Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
350 // ACLE predefines. Many can only have one possible value on v8 AArch64.
351 Builder.defineMacro("__ARM_ACLE", "200");
352 Builder.defineMacro("__ARM_ARCH",
353 std::to_string(ArchInfo->Version.getMajor()));
354 Builder.defineMacro("__ARM_ARCH_PROFILE",
355 std::string("'") + (char)ArchInfo->Profile + "'");
357 Builder.defineMacro("__ARM_64BIT_STATE", "1");
358 Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
359 Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
361 Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
362 Builder.defineMacro("__ARM_FEATURE_FMA", "1");
363 Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
364 Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
365 Builder.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility
366 Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
367 Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
369 Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
371 // 0xe implies support for half, single and double precision operations.
372 if (FPU & FPUMode)
373 Builder.defineMacro("__ARM_FP", "0xE");
375 // PCS specifies this for SysV variants, which is all we support. Other ABIs
376 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
377 Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
378 Builder.defineMacro("__ARM_FP16_ARGS", "1");
380 if (Opts.UnsafeFPMath)
381 Builder.defineMacro("__ARM_FP_FAST", "1");
383 Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
384 Twine(Opts.WCharSize ? Opts.WCharSize : 4));
386 Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
388 if (FPU & NeonMode) {
389 Builder.defineMacro("__ARM_NEON", "1");
390 // 64-bit NEON supports half, single and double precision operations.
391 Builder.defineMacro("__ARM_NEON_FP", "0xE");
394 if (FPU & SveMode)
395 Builder.defineMacro("__ARM_FEATURE_SVE", "1");
397 if ((FPU & NeonMode) && (FPU & SveMode))
398 Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
400 if (HasSVE2)
401 Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
403 if (HasSVE2 && HasSVE2AES)
404 Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
406 if (HasSVE2 && HasSVE2BitPerm)
407 Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
409 if (HasSVE2 && HasSVE2SHA3)
410 Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
412 if (HasSVE2 && HasSVE2SM4)
413 Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
415 if (HasCRC)
416 Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
418 if (HasRCPC)
419 Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
421 if (HasFMV)
422 Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
424 // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
425 // macros for AES, SHA2, SHA3 and SM4
426 if (HasAES && HasSHA2)
427 Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
429 if (HasAES)
430 Builder.defineMacro("__ARM_FEATURE_AES", "1");
432 if (HasSHA2)
433 Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
435 if (HasSHA3) {
436 Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
437 Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
440 if (HasSM4) {
441 Builder.defineMacro("__ARM_FEATURE_SM3", "1");
442 Builder.defineMacro("__ARM_FEATURE_SM4", "1");
445 if (HasPAuth)
446 Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
448 if (HasUnaligned)
449 Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
451 if ((FPU & NeonMode) && HasFullFP16)
452 Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
453 if (HasFullFP16)
454 Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
456 if (HasDotProd)
457 Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
459 if (HasMTE)
460 Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
462 if (HasTME)
463 Builder.defineMacro("__ARM_FEATURE_TME", "1");
465 if (HasMatMul)
466 Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
468 if (HasLSE)
469 Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
471 if (HasBFloat16) {
472 Builder.defineMacro("__ARM_FEATURE_BF16", "1");
473 Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
474 Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
475 Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
478 if ((FPU & SveMode) && HasBFloat16) {
479 Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
482 if ((FPU & SveMode) && HasMatmulFP64)
483 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
485 if ((FPU & SveMode) && HasMatmulFP32)
486 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
488 if ((FPU & SveMode) && HasMatMul)
489 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
491 if ((FPU & NeonMode) && HasFP16FML)
492 Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
494 if (Opts.hasSignReturnAddress()) {
495 // Bitmask:
496 // 0: Protection using the A key
497 // 1: Protection using the B key
498 // 2: Protection including leaf functions
499 unsigned Value = 0;
501 if (Opts.isSignReturnAddressWithAKey())
502 Value |= (1 << 0);
503 else
504 Value |= (1 << 1);
506 if (Opts.isSignReturnAddressScopeAll())
507 Value |= (1 << 2);
509 Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
512 if (Opts.BranchTargetEnforcement)
513 Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
515 if (HasLS64)
516 Builder.defineMacro("__ARM_FEATURE_LS64", "1");
518 if (HasRandGen)
519 Builder.defineMacro("__ARM_FEATURE_RNG", "1");
521 if (HasMOPS)
522 Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
524 if (HasD128)
525 Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
527 if (*ArchInfo == llvm::AArch64::ARMV8_1A)
528 getTargetDefinesARMV81A(Opts, Builder);
529 else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
530 getTargetDefinesARMV82A(Opts, Builder);
531 else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
532 getTargetDefinesARMV83A(Opts, Builder);
533 else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
534 getTargetDefinesARMV84A(Opts, Builder);
535 else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
536 getTargetDefinesARMV85A(Opts, Builder);
537 else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
538 getTargetDefinesARMV86A(Opts, Builder);
539 else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
540 getTargetDefinesARMV87A(Opts, Builder);
541 else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
542 getTargetDefinesARMV88A(Opts, Builder);
543 else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
544 getTargetDefinesARMV89A(Opts, Builder);
545 else if (*ArchInfo == llvm::AArch64::ARMV9A)
546 getTargetDefinesARMV9A(Opts, Builder);
547 else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
548 getTargetDefinesARMV91A(Opts, Builder);
549 else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
550 getTargetDefinesARMV92A(Opts, Builder);
551 else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
552 getTargetDefinesARMV93A(Opts, Builder);
553 else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
554 getTargetDefinesARMV94A(Opts, Builder);
556 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
557 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
558 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
559 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
560 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
562 // Allow detection of fast FMA support.
563 Builder.defineMacro("__FP_FAST_FMA", "1");
564 Builder.defineMacro("__FP_FAST_FMAF", "1");
566 // C/C++ operators work on both VLS and VLA SVE types
567 if (FPU & SveMode)
568 Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
570 if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
571 Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
575 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
576 return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
577 Builtin::FirstTSBuiltin);
580 std::optional<std::pair<unsigned, unsigned>>
581 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
582 if (LangOpts.VScaleMin || LangOpts.VScaleMax)
583 return std::pair<unsigned, unsigned>(
584 LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
586 if (hasFeature("sve"))
587 return std::pair<unsigned, unsigned>(1, 16);
589 return std::nullopt;
592 unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
593 if (Name == "default")
594 return 0;
595 for (const auto &E : llvm::AArch64::Extensions)
596 if (Name == E.Name)
597 return E.FmvPriority;
598 return 0;
601 unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
602 // Take the maximum priority as per feature cost, so more features win.
603 return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
606 bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
607 auto F = llvm::find_if(llvm::AArch64::Extensions, [&](const auto &E) {
608 return Name == E.Name && !E.DependentFeatures.empty();
610 return F != std::end(llvm::AArch64::Extensions);
613 StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
614 auto F = llvm::find_if(llvm::AArch64::Extensions,
615 [&](const auto &E) { return Name == E.Name; });
616 return F != std::end(llvm::AArch64::Extensions) ? F->DependentFeatures
617 : StringRef();
620 bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
621 for (const auto &E : llvm::AArch64::Extensions)
622 if (FeatureStr == E.Name)
623 return true;
624 return false;
627 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
628 return llvm::StringSwitch<bool>(Feature)
629 .Cases("aarch64", "arm64", "arm", true)
630 .Case("fmv", HasFMV)
631 .Cases("neon", "fp", "simd", FPU & NeonMode)
632 .Case("jscvt", HasJSCVT)
633 .Case("fcma", HasFCMA)
634 .Case("rng", HasRandGen)
635 .Case("flagm", HasFlagM)
636 .Case("flagm2", HasAlternativeNZCV)
637 .Case("fp16fml", HasFP16FML)
638 .Case("dotprod", HasDotProd)
639 .Case("sm4", HasSM4)
640 .Case("rdm", HasRDM)
641 .Case("lse", HasLSE)
642 .Case("crc", HasCRC)
643 .Case("sha2", HasSHA2)
644 .Case("sha3", HasSHA3)
645 .Cases("aes", "pmull", HasAES)
646 .Cases("fp16", "fullfp16", HasFullFP16)
647 .Case("dit", HasDIT)
648 .Case("dpb", HasCCPP)
649 .Case("dpb2", HasCCDP)
650 .Case("rcpc", HasRCPC)
651 .Case("frintts", HasFRInt3264)
652 .Case("i8mm", HasMatMul)
653 .Case("bf16", HasBFloat16)
654 .Case("sve", FPU & SveMode)
655 .Case("sve-bf16", FPU & SveMode && HasBFloat16)
656 .Case("sve-i8mm", FPU & SveMode && HasMatMul)
657 .Case("f32mm", FPU & SveMode && HasMatmulFP32)
658 .Case("f64mm", FPU & SveMode && HasMatmulFP64)
659 .Case("sve2", FPU & SveMode && HasSVE2)
660 .Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
661 .Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
662 .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
663 .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
664 .Case("sme", HasSME)
665 .Case("sme-f64f64", HasSMEF64F64)
666 .Case("sme-i16i64", HasSMEI16I64)
667 .Cases("memtag", "memtag2", HasMTE)
668 .Case("sb", HasSB)
669 .Case("predres", HasPredRes)
670 .Cases("ssbs", "ssbs2", HasSSBS)
671 .Case("bti", HasBTI)
672 .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
673 .Case("wfxt", HasWFxT)
674 .Default(false);
677 void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
678 StringRef Name, bool Enabled) const {
679 Features[Name] = Enabled;
680 // If the feature is an architecture feature (like v8.2a), add all previous
681 // architecture versions and any dependant target features.
682 const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
683 llvm::AArch64::ArchInfo::findBySubArch(Name);
685 if (!ArchInfo)
686 return; // Not an architecture, nothing more to do.
688 // Disabling an architecture feature does not affect dependent features
689 if (!Enabled)
690 return;
692 for (const auto *OtherArch : llvm::AArch64::ArchInfos)
693 if (ArchInfo->implies(*OtherArch))
694 Features[OtherArch->getSubArch()] = true;
696 // Set any features implied by the architecture
697 std::vector<StringRef> CPUFeats;
698 if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
699 for (auto F : CPUFeats) {
700 assert(F[0] == '+' && "Expected + in target feature!");
701 Features[F.drop_front(1)] = true;
706 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
707 DiagnosticsEngine &Diags) {
708 for (const auto &Feature : Features) {
709 if (Feature == "-fp-armv8")
710 HasNoFP = true;
711 if (Feature == "-neon")
712 HasNoNeon = true;
713 if (Feature == "-sve")
714 HasNoSVE = true;
716 if (Feature == "+neon" || Feature == "+fp-armv8")
717 FPU |= NeonMode;
718 if (Feature == "+jscvt") {
719 HasJSCVT = true;
720 FPU |= NeonMode;
722 if (Feature == "+fcma") {
723 HasFCMA = true;
724 FPU |= NeonMode;
727 if (Feature == "+sve") {
728 FPU |= NeonMode;
729 FPU |= SveMode;
730 HasFullFP16 = true;
732 if (Feature == "+sve2") {
733 FPU |= NeonMode;
734 FPU |= SveMode;
735 HasFullFP16 = true;
736 HasSVE2 = true;
738 if (Feature == "+sve2-aes") {
739 FPU |= NeonMode;
740 FPU |= SveMode;
741 HasFullFP16 = true;
742 HasSVE2 = true;
743 HasSVE2AES = true;
745 if (Feature == "+sve2-sha3") {
746 FPU |= NeonMode;
747 FPU |= SveMode;
748 HasFullFP16 = true;
749 HasSVE2 = true;
750 HasSVE2SHA3 = true;
752 if (Feature == "+sve2-sm4") {
753 FPU |= NeonMode;
754 FPU |= SveMode;
755 HasFullFP16 = true;
756 HasSVE2 = true;
757 HasSVE2SM4 = true;
759 if (Feature == "+sve2-bitperm") {
760 FPU |= NeonMode;
761 FPU |= SveMode;
762 HasFullFP16 = true;
763 HasSVE2 = true;
764 HasSVE2BitPerm = true;
766 if (Feature == "+f32mm") {
767 FPU |= NeonMode;
768 FPU |= SveMode;
769 HasFullFP16 = true;
770 HasMatmulFP32 = true;
772 if (Feature == "+f64mm") {
773 FPU |= NeonMode;
774 FPU |= SveMode;
775 HasFullFP16 = true;
776 HasMatmulFP64 = true;
778 if (Feature == "+sme") {
779 HasSME = true;
780 HasBFloat16 = true;
781 HasFullFP16 = true;
783 if (Feature == "+sme-f64f64") {
784 HasSME = true;
785 HasSMEF64F64 = true;
786 HasBFloat16 = true;
787 HasFullFP16 = true;
789 if (Feature == "+sme-i16i64") {
790 HasSME = true;
791 HasSMEI16I64 = true;
792 HasBFloat16 = true;
793 HasFullFP16 = true;
795 if (Feature == "+sb")
796 HasSB = true;
797 if (Feature == "+predres")
798 HasPredRes = true;
799 if (Feature == "+ssbs")
800 HasSSBS = true;
801 if (Feature == "+bti")
802 HasBTI = true;
803 if (Feature == "+wfxt")
804 HasWFxT = true;
805 if (Feature == "-fmv")
806 HasFMV = false;
807 if (Feature == "+crc")
808 HasCRC = true;
809 if (Feature == "+rcpc")
810 HasRCPC = true;
811 if (Feature == "+aes") {
812 FPU |= NeonMode;
813 HasAES = true;
815 if (Feature == "+sha2") {
816 FPU |= NeonMode;
817 HasSHA2 = true;
819 if (Feature == "+sha3") {
820 FPU |= NeonMode;
821 HasSHA2 = true;
822 HasSHA3 = true;
824 if (Feature == "+rdm") {
825 FPU |= NeonMode;
826 HasRDM = true;
828 if (Feature == "+dit")
829 HasDIT = true;
830 if (Feature == "+cccp")
831 HasCCPP = true;
832 if (Feature == "+ccdp") {
833 HasCCPP = true;
834 HasCCDP = true;
836 if (Feature == "+fptoint")
837 HasFRInt3264 = true;
838 if (Feature == "+sm4") {
839 FPU |= NeonMode;
840 HasSM4 = true;
842 if (Feature == "+strict-align")
843 HasUnaligned = false;
844 // All predecessor archs are added but select the latest one for ArchKind.
845 if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
846 ArchInfo = &llvm::AArch64::ARMV8A;
847 if (Feature == "+v8.1a" &&
848 ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
849 ArchInfo = &llvm::AArch64::ARMV8_1A;
850 if (Feature == "+v8.2a" &&
851 ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
852 ArchInfo = &llvm::AArch64::ARMV8_2A;
853 if (Feature == "+v8.3a" &&
854 ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
855 ArchInfo = &llvm::AArch64::ARMV8_3A;
856 if (Feature == "+v8.4a" &&
857 ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
858 ArchInfo = &llvm::AArch64::ARMV8_4A;
859 if (Feature == "+v8.5a" &&
860 ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
861 ArchInfo = &llvm::AArch64::ARMV8_5A;
862 if (Feature == "+v8.6a" &&
863 ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
864 ArchInfo = &llvm::AArch64::ARMV8_6A;
865 if (Feature == "+v8.7a" &&
866 ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
867 ArchInfo = &llvm::AArch64::ARMV8_7A;
868 if (Feature == "+v8.8a" &&
869 ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
870 ArchInfo = &llvm::AArch64::ARMV8_8A;
871 if (Feature == "+v8.9a" &&
872 ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
873 ArchInfo = &llvm::AArch64::ARMV8_9A;
874 if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
875 ArchInfo = &llvm::AArch64::ARMV9A;
876 if (Feature == "+v9.1a" &&
877 ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
878 ArchInfo = &llvm::AArch64::ARMV9_1A;
879 if (Feature == "+v9.2a" &&
880 ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
881 ArchInfo = &llvm::AArch64::ARMV9_2A;
882 if (Feature == "+v9.3a" &&
883 ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
884 ArchInfo = &llvm::AArch64::ARMV9_3A;
885 if (Feature == "+v9.4a" &&
886 ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
887 ArchInfo = &llvm::AArch64::ARMV9_4A;
888 if (Feature == "+v8r")
889 ArchInfo = &llvm::AArch64::ARMV8R;
890 if (Feature == "+fullfp16") {
891 FPU |= NeonMode;
892 HasFullFP16 = true;
894 if (Feature == "+dotprod") {
895 FPU |= NeonMode;
896 HasDotProd = true;
898 if (Feature == "+fp16fml") {
899 FPU |= NeonMode;
900 HasFullFP16 = true;
901 HasFP16FML = true;
903 if (Feature == "+mte")
904 HasMTE = true;
905 if (Feature == "+tme")
906 HasTME = true;
907 if (Feature == "+pauth")
908 HasPAuth = true;
909 if (Feature == "+i8mm")
910 HasMatMul = true;
911 if (Feature == "+bf16")
912 HasBFloat16 = true;
913 if (Feature == "+lse")
914 HasLSE = true;
915 if (Feature == "+ls64")
916 HasLS64 = true;
917 if (Feature == "+rand")
918 HasRandGen = true;
919 if (Feature == "+flagm")
920 HasFlagM = true;
921 if (Feature == "+altnzcv") {
922 HasFlagM = true;
923 HasAlternativeNZCV = true;
925 if (Feature == "+mops")
926 HasMOPS = true;
927 if (Feature == "+d128")
928 HasD128 = true;
929 if (Feature == "+gcs")
930 HasGCS = true;
933 // Check features that are manually disabled by command line options.
934 // This needs to be checked after architecture-related features are handled,
935 // making sure they are properly disabled when required.
936 for (const auto &Feature : Features) {
937 if (Feature == "-d128")
938 HasD128 = false;
941 setDataLayout();
942 setArchFeatures();
944 if (HasNoFP) {
945 FPU &= ~FPUMode;
946 FPU &= ~NeonMode;
947 FPU &= ~SveMode;
949 if (HasNoNeon) {
950 FPU &= ~NeonMode;
951 FPU &= ~SveMode;
953 if (HasNoSVE)
954 FPU &= ~SveMode;
956 return true;
959 bool AArch64TargetInfo::initFeatureMap(
960 llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
961 const std::vector<std::string> &FeaturesVec) const {
962 std::vector<std::string> UpdatedFeaturesVec;
963 // Parse the CPU and add any implied features.
964 std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
965 if (CpuInfo) {
966 uint64_t Exts = CpuInfo->getImpliedExtensions();
967 std::vector<StringRef> CPUFeats;
968 llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
969 for (auto F : CPUFeats) {
970 assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
971 UpdatedFeaturesVec.push_back(F.str());
975 // Process target and dependent features. This is done in two loops collecting
976 // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
977 // add target '+/-'features that can later disable some of features added on
978 // the first loop. Function Multi Versioning features begin with '?'.
979 for (const auto &Feature : FeaturesVec)
980 if (((Feature[0] == '?' || Feature[0] == '+')) &&
981 AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
982 StringRef DepFeatures =
983 AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
984 SmallVector<StringRef, 1> AttrFeatures;
985 DepFeatures.split(AttrFeatures, ",");
986 for (auto F : AttrFeatures)
987 UpdatedFeaturesVec.push_back(F.str());
989 for (const auto &Feature : FeaturesVec)
990 if (Feature[0] != '?') {
991 std::string UpdatedFeature = Feature;
992 if (Feature[0] == '+') {
993 std::optional<llvm::AArch64::ExtensionInfo> Extension =
994 llvm::AArch64::parseArchExtension(Feature.substr(1));
995 if (Extension)
996 UpdatedFeature = Extension->Feature.str();
998 UpdatedFeaturesVec.push_back(UpdatedFeature);
1001 return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
1004 // Parse AArch64 Target attributes, which are a comma separated list of:
1005 // "arch=<arch>" - parsed to features as per -march=..
1006 // "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1007 // "tune=<cpu>" - TuneCPU set to <cpu>
1008 // "feature", "no-feature" - Add (or remove) feature.
1009 // "+feature", "+nofeature" - Add (or remove) feature.
1010 ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1011 ParsedTargetAttr Ret;
1012 if (Features == "default")
1013 return Ret;
1014 SmallVector<StringRef, 1> AttrFeatures;
1015 Features.split(AttrFeatures, ",");
1016 bool FoundArch = false;
1018 auto SplitAndAddFeatures = [](StringRef FeatString,
1019 std::vector<std::string> &Features) {
1020 SmallVector<StringRef, 8> SplitFeatures;
1021 FeatString.split(SplitFeatures, StringRef("+"), -1, false);
1022 for (StringRef Feature : SplitFeatures) {
1023 StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1024 if (!FeatureName.empty())
1025 Features.push_back(FeatureName.str());
1026 else
1027 // Pushing the original feature string to give a sema error later on
1028 // when they get checked.
1029 if (Feature.startswith("no"))
1030 Features.push_back("-" + Feature.drop_front(2).str());
1031 else
1032 Features.push_back("+" + Feature.str());
1036 for (auto &Feature : AttrFeatures) {
1037 Feature = Feature.trim();
1038 if (Feature.startswith("fpmath="))
1039 continue;
1041 if (Feature.startswith("branch-protection=")) {
1042 Ret.BranchProtection = Feature.split('=').second.trim();
1043 continue;
1046 if (Feature.startswith("arch=")) {
1047 if (FoundArch)
1048 Ret.Duplicate = "arch=";
1049 FoundArch = true;
1050 std::pair<StringRef, StringRef> Split =
1051 Feature.split("=").second.trim().split("+");
1052 const std::optional<llvm::AArch64::ArchInfo> AI =
1053 llvm::AArch64::parseArch(Split.first);
1055 // Parse the architecture version, adding the required features to
1056 // Ret.Features.
1057 if (!AI)
1058 continue;
1059 Ret.Features.push_back(AI->ArchFeature.str());
1060 // Add any extra features, after the +
1061 SplitAndAddFeatures(Split.second, Ret.Features);
1062 } else if (Feature.startswith("cpu=")) {
1063 if (!Ret.CPU.empty())
1064 Ret.Duplicate = "cpu=";
1065 else {
1066 // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1067 // "+feat" features.
1068 std::pair<StringRef, StringRef> Split =
1069 Feature.split("=").second.trim().split("+");
1070 Ret.CPU = Split.first;
1071 SplitAndAddFeatures(Split.second, Ret.Features);
1073 } else if (Feature.startswith("tune=")) {
1074 if (!Ret.Tune.empty())
1075 Ret.Duplicate = "tune=";
1076 else
1077 Ret.Tune = Feature.split("=").second.trim();
1078 } else if (Feature.startswith("+")) {
1079 SplitAndAddFeatures(Feature, Ret.Features);
1080 } else if (Feature.startswith("no-")) {
1081 StringRef FeatureName =
1082 llvm::AArch64::getArchExtFeature(Feature.split("-").second);
1083 if (!FeatureName.empty())
1084 Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
1085 else
1086 Ret.Features.push_back("-" + Feature.split("-").second.str());
1087 } else {
1088 // Try parsing the string to the internal target feature name. If it is
1089 // invalid, add the original string (which could already be an internal
1090 // name). These should be checked later by isValidFeatureName.
1091 StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1092 if (!FeatureName.empty())
1093 Ret.Features.push_back(FeatureName.str());
1094 else
1095 Ret.Features.push_back("+" + Feature.str());
1098 return Ret;
1101 bool AArch64TargetInfo::hasBFloat16Type() const {
1102 return true;
1105 TargetInfo::CallingConvCheckResult
1106 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1107 switch (CC) {
1108 case CC_C:
1109 case CC_Swift:
1110 case CC_SwiftAsync:
1111 case CC_PreserveMost:
1112 case CC_PreserveAll:
1113 case CC_OpenCLKernel:
1114 case CC_AArch64VectorCall:
1115 case CC_AArch64SVEPCS:
1116 case CC_Win64:
1117 return CCCR_OK;
1118 default:
1119 return CCCR_Warning;
1123 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1125 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1126 return TargetInfo::AArch64ABIBuiltinVaList;
1129 const char *const AArch64TargetInfo::GCCRegNames[] = {
1130 // 32-bit Integer registers
1131 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1132 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1133 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1135 // 64-bit Integer registers
1136 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1137 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1138 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1140 // 32-bit floating point regsisters
1141 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1142 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1143 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1145 // 64-bit floating point regsisters
1146 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1147 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1148 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1150 // Neon vector registers
1151 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1152 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1153 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1155 // SVE vector registers
1156 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
1157 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1158 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1160 // SVE predicate registers
1161 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
1162 "p11", "p12", "p13", "p14", "p15"
1165 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1166 return llvm::ArrayRef(GCCRegNames);
1169 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1170 {{"w31"}, "wsp"},
1171 {{"x31"}, "sp"},
1172 // GCC rN registers are aliases of xN registers.
1173 {{"r0"}, "x0"},
1174 {{"r1"}, "x1"},
1175 {{"r2"}, "x2"},
1176 {{"r3"}, "x3"},
1177 {{"r4"}, "x4"},
1178 {{"r5"}, "x5"},
1179 {{"r6"}, "x6"},
1180 {{"r7"}, "x7"},
1181 {{"r8"}, "x8"},
1182 {{"r9"}, "x9"},
1183 {{"r10"}, "x10"},
1184 {{"r11"}, "x11"},
1185 {{"r12"}, "x12"},
1186 {{"r13"}, "x13"},
1187 {{"r14"}, "x14"},
1188 {{"r15"}, "x15"},
1189 {{"r16"}, "x16"},
1190 {{"r17"}, "x17"},
1191 {{"r18"}, "x18"},
1192 {{"r19"}, "x19"},
1193 {{"r20"}, "x20"},
1194 {{"r21"}, "x21"},
1195 {{"r22"}, "x22"},
1196 {{"r23"}, "x23"},
1197 {{"r24"}, "x24"},
1198 {{"r25"}, "x25"},
1199 {{"r26"}, "x26"},
1200 {{"r27"}, "x27"},
1201 {{"r28"}, "x28"},
1202 {{"r29", "x29"}, "fp"},
1203 {{"r30", "x30"}, "lr"},
1204 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1205 // don't want to substitute one of these for a different-sized one.
1208 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1209 return llvm::ArrayRef(GCCRegAliases);
1212 // Returns the length of cc constraint.
1213 static unsigned matchAsmCCConstraint(const char *Name) {
1214 constexpr unsigned len = 5;
1215 auto RV = llvm::StringSwitch<unsigned>(Name)
1216 .Case("@cceq", len)
1217 .Case("@ccne", len)
1218 .Case("@cchs", len)
1219 .Case("@cccs", len)
1220 .Case("@cccc", len)
1221 .Case("@cclo", len)
1222 .Case("@ccmi", len)
1223 .Case("@ccpl", len)
1224 .Case("@ccvs", len)
1225 .Case("@ccvc", len)
1226 .Case("@cchi", len)
1227 .Case("@ccls", len)
1228 .Case("@ccge", len)
1229 .Case("@cclt", len)
1230 .Case("@ccgt", len)
1231 .Case("@ccle", len)
1232 .Default(0);
1233 return RV;
1236 std::string
1237 AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1238 std::string R;
1239 switch (*Constraint) {
1240 case 'U': // Three-character constraint; add "@3" hint for later parsing.
1241 R = std::string("@3") + std::string(Constraint, 3);
1242 Constraint += 2;
1243 break;
1244 case '@':
1245 if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
1246 std::string Converted = "{" + std::string(Constraint, Len) + "}";
1247 Constraint += Len - 1;
1248 return Converted;
1250 return std::string(1, *Constraint);
1251 default:
1252 R = TargetInfo::convertConstraint(Constraint);
1253 break;
1255 return R;
1258 bool AArch64TargetInfo::validateAsmConstraint(
1259 const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1260 switch (*Name) {
1261 default:
1262 return false;
1263 case 'w': // Floating point and SIMD registers (V0-V31)
1264 Info.setAllowsRegister();
1265 return true;
1266 case 'I': // Constant that can be used with an ADD instruction
1267 case 'J': // Constant that can be used with a SUB instruction
1268 case 'K': // Constant that can be used with a 32-bit logical instruction
1269 case 'L': // Constant that can be used with a 64-bit logical instruction
1270 case 'M': // Constant that can be used as a 32-bit MOV immediate
1271 case 'N': // Constant that can be used as a 64-bit MOV immediate
1272 case 'Y': // Floating point constant zero
1273 case 'Z': // Integer constant zero
1274 return true;
1275 case 'Q': // A memory reference with base register and no offset
1276 Info.setAllowsMemory();
1277 return true;
1278 case 'S': // A symbolic address
1279 Info.setAllowsRegister();
1280 return true;
1281 case 'U':
1282 if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
1283 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
1284 Info.setAllowsRegister();
1285 Name += 2;
1286 return true;
1288 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1289 // Utf: A memory address suitable for ldp/stp in TF mode.
1290 // Usa: An absolute symbolic address.
1291 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1293 // Better to return an error saying that it's an unrecognised constraint
1294 // even if this is a valid constraint in gcc.
1295 return false;
1296 case 'z': // Zero register, wzr or xzr
1297 Info.setAllowsRegister();
1298 return true;
1299 case 'x': // Floating point and SIMD registers (V0-V15)
1300 Info.setAllowsRegister();
1301 return true;
1302 case 'y': // SVE registers (V0-V7)
1303 Info.setAllowsRegister();
1304 return true;
1305 case '@':
1306 // CC condition
1307 if (const unsigned Len = matchAsmCCConstraint(Name)) {
1308 Name += Len - 1;
1309 Info.setAllowsRegister();
1310 return true;
1313 return false;
1316 bool AArch64TargetInfo::validateConstraintModifier(
1317 StringRef Constraint, char Modifier, unsigned Size,
1318 std::string &SuggestedModifier) const {
1319 // Strip off constraint modifiers.
1320 while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
1321 Constraint = Constraint.substr(1);
1323 switch (Constraint[0]) {
1324 default:
1325 return true;
1326 case 'z':
1327 case 'r': {
1328 switch (Modifier) {
1329 case 'x':
1330 case 'w':
1331 // For now assume that the person knows what they're
1332 // doing with the modifier.
1333 return true;
1334 default:
1335 // By default an 'r' constraint will be in the 'x'
1336 // registers.
1337 if (Size == 64)
1338 return true;
1340 if (Size == 512)
1341 return HasLS64;
1343 SuggestedModifier = "w";
1344 return false;
1350 std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1352 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1353 if (RegNo == 0)
1354 return 0;
1355 if (RegNo == 1)
1356 return 1;
1357 return -1;
1360 bool AArch64TargetInfo::hasInt128Type() const { return true; }
1362 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1363 const TargetOptions &Opts)
1364 : AArch64TargetInfo(Triple, Opts) {}
1366 void AArch64leTargetInfo::setDataLayout() {
1367 if (getTriple().isOSBinFormatMachO()) {
1368 if(getTriple().isArch32Bit())
1369 resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
1370 else
1371 resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
1372 } else
1373 resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1376 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1377 MacroBuilder &Builder) const {
1378 Builder.defineMacro("__AARCH64EL__");
1379 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1382 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1383 const TargetOptions &Opts)
1384 : AArch64TargetInfo(Triple, Opts) {}
1386 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1387 MacroBuilder &Builder) const {
1388 Builder.defineMacro("__AARCH64EB__");
1389 Builder.defineMacro("__AARCH_BIG_ENDIAN");
1390 Builder.defineMacro("__ARM_BIG_ENDIAN");
1391 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1394 void AArch64beTargetInfo::setDataLayout() {
1395 assert(!getTriple().isOSBinFormatMachO());
1396 resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1399 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1400 const TargetOptions &Opts)
1401 : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1403 // This is an LLP64 platform.
1404 // int:4, long:4, long long:8, long double:8.
1405 IntWidth = IntAlign = 32;
1406 LongWidth = LongAlign = 32;
1407 DoubleAlign = LongLongAlign = 64;
1408 LongDoubleWidth = LongDoubleAlign = 64;
1409 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1410 IntMaxType = SignedLongLong;
1411 Int64Type = SignedLongLong;
1412 SizeType = UnsignedLongLong;
1413 PtrDiffType = SignedLongLong;
1414 IntPtrType = SignedLongLong;
1417 void WindowsARM64TargetInfo::setDataLayout() {
1418 resetDataLayout(Triple.isOSBinFormatMachO()
1419 ? "e-m:o-i64:64-i128:128-n32:64-S128"
1420 : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1421 Triple.isOSBinFormatMachO() ? "_" : "");
1424 TargetInfo::BuiltinVaListKind
1425 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1426 return TargetInfo::CharPtrBuiltinVaList;
1429 TargetInfo::CallingConvCheckResult
1430 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1431 switch (CC) {
1432 case CC_X86StdCall:
1433 case CC_X86ThisCall:
1434 case CC_X86FastCall:
1435 case CC_X86VectorCall:
1436 return CCCR_Ignore;
1437 case CC_C:
1438 case CC_OpenCLKernel:
1439 case CC_PreserveMost:
1440 case CC_PreserveAll:
1441 case CC_Swift:
1442 case CC_SwiftAsync:
1443 case CC_Win64:
1444 return CCCR_OK;
1445 default:
1446 return CCCR_Warning;
1450 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1451 const TargetOptions &Opts)
1452 : WindowsARM64TargetInfo(Triple, Opts) {
1453 TheCXXABI.set(TargetCXXABI::Microsoft);
1456 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1457 MacroBuilder &Builder) const {
1458 WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1459 Builder.defineMacro("_M_ARM64", "1");
1462 TargetInfo::CallingConvKind
1463 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1464 return CCK_MicrosoftWin64;
1467 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
1468 unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
1470 // MSVC does size based alignment for arm64 based on alignment section in
1471 // below document, replicate that to keep alignment consistent with object
1472 // files compiled by MSVC.
1473 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1474 if (TypeSize >= 512) { // TypeSize >= 64 bytes
1475 Align = std::max(Align, 128u); // align type at least 16 bytes
1476 } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
1477 Align = std::max(Align, 64u); // align type at least 8 butes
1478 } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
1479 Align = std::max(Align, 32u); // align type at least 4 bytes
1481 return Align;
1484 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1485 const TargetOptions &Opts)
1486 : WindowsARM64TargetInfo(Triple, Opts) {
1487 TheCXXABI.set(TargetCXXABI::GenericAArch64);
1490 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1491 const TargetOptions &Opts)
1492 : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1493 Int64Type = SignedLongLong;
1494 if (getTriple().isArch32Bit())
1495 IntMaxType = SignedLongLong;
1497 WCharType = SignedInt;
1498 UseSignedCharForObjCBool = false;
1500 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1501 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1503 UseZeroLengthBitfieldAlignment = false;
1505 if (getTriple().isArch32Bit()) {
1506 UseBitFieldTypeAlignment = false;
1507 ZeroLengthBitfieldBoundary = 32;
1508 UseZeroLengthBitfieldAlignment = true;
1509 TheCXXABI.set(TargetCXXABI::WatchOS);
1510 } else
1511 TheCXXABI.set(TargetCXXABI::AppleARM64);
1514 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1515 const llvm::Triple &Triple,
1516 MacroBuilder &Builder) const {
1517 Builder.defineMacro("__AARCH64_SIMD__");
1518 if (Triple.isArch32Bit())
1519 Builder.defineMacro("__ARM64_ARCH_8_32__");
1520 else
1521 Builder.defineMacro("__ARM64_ARCH_8__");
1522 Builder.defineMacro("__ARM_NEON__");
1523 Builder.defineMacro("__REGISTER_PREFIX__", "");
1524 Builder.defineMacro("__arm64", "1");
1525 Builder.defineMacro("__arm64__", "1");
1527 if (Triple.isArm64e())
1528 Builder.defineMacro("__arm64e__", "1");
1530 getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1533 TargetInfo::BuiltinVaListKind
1534 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1535 return TargetInfo::CharPtrBuiltinVaList;
1538 // 64-bit RenderScript is aarch64
1539 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1540 const TargetOptions &Opts)
1541 : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1542 Triple.getOSName(),
1543 Triple.getEnvironmentName()),
1544 Opts) {
1545 IsRenderScriptTarget = true;
1548 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1549 MacroBuilder &Builder) const {
1550 Builder.defineMacro("__RENDERSCRIPT__");
1551 AArch64leTargetInfo::getTargetDefines(Opts, Builder);