[docs] Fix build-docs.sh
[llvm-project.git] / clang / lib / Basic / Targets / AArch64.cpp
blob85346ebf92ab6cff96123c6affa35f02bd5086e0
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
11 //===----------------------------------------------------------------------===//
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
22 using namespace clang;
23 using namespace clang::targets;
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS) \
27 {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
30 #define BUILTIN(ID, TYPE, ATTRS) \
31 {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
34 #define BUILTIN(ID, TYPE, ATTRS) \
35 {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
37 {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
39 {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
43 static StringRef getArchVersionString(llvm::AArch64::ArchKind Kind) {
44 switch (Kind) {
45 case llvm::AArch64::ArchKind::ARMV9A:
46 case llvm::AArch64::ArchKind::ARMV9_1A:
47 case llvm::AArch64::ArchKind::ARMV9_2A:
48 case llvm::AArch64::ArchKind::ARMV9_3A:
49 return "9";
50 default:
51 return "8";
55 StringRef AArch64TargetInfo::getArchProfile() const {
56 switch (ArchKind) {
57 case llvm::AArch64::ArchKind::ARMV8R:
58 return "R";
59 default:
60 return "A";
64 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
65 const TargetOptions &Opts)
66 : TargetInfo(Triple), ABI("aapcs") {
67 if (getTriple().isOSOpenBSD()) {
68 Int64Type = SignedLongLong;
69 IntMaxType = SignedLongLong;
70 } else {
71 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
72 WCharType = UnsignedInt;
74 Int64Type = SignedLong;
75 IntMaxType = SignedLong;
78 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
79 HasLegalHalfType = true;
80 HasFloat16 = true;
82 if (Triple.isArch64Bit())
83 LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
84 else
85 LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
87 MaxVectorAlign = 128;
88 MaxAtomicInlineWidth = 128;
89 MaxAtomicPromoteWidth = 128;
91 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
92 LongDoubleFormat = &llvm::APFloat::IEEEquad();
94 BFloat16Width = BFloat16Align = 16;
95 BFloat16Format = &llvm::APFloat::BFloat();
97 // Make __builtin_ms_va_list available.
98 HasBuiltinMSVaList = true;
100 // Make the SVE types available. Note that this deliberately doesn't
101 // depend on SveMode, since in principle it should be possible to turn
102 // SVE on and off within a translation unit. It should also be possible
103 // to compile the global declaration:
105 // __SVInt8_t *ptr;
107 // even without SVE.
108 HasAArch64SVETypes = true;
110 // {} in inline assembly are neon specifiers, not assembly variant
111 // specifiers.
112 NoAsmVariants = true;
114 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
115 // contributes to the alignment of the containing aggregate in the same way
116 // a plain (non bit-field) member of that type would, without exception for
117 // zero-sized or anonymous bit-fields."
118 assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
119 UseZeroLengthBitfieldAlignment = true;
121 // AArch64 targets default to using the ARM C++ ABI.
122 TheCXXABI.set(TargetCXXABI::GenericAArch64);
124 if (Triple.getOS() == llvm::Triple::Linux)
125 this->MCountName = "\01_mcount";
126 else if (Triple.getOS() == llvm::Triple::UnknownOS)
127 this->MCountName =
128 Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
131 StringRef AArch64TargetInfo::getABI() const { return ABI; }
133 bool AArch64TargetInfo::setABI(const std::string &Name) {
134 if (Name != "aapcs" && Name != "darwinpcs")
135 return false;
137 ABI = Name;
138 return true;
141 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
142 BranchProtectionInfo &BPI,
143 StringRef &Err) const {
144 llvm::ARM::ParsedBranchProtection PBP;
145 if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
146 return false;
148 BPI.SignReturnAddr =
149 llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
150 .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
151 .Case("all", LangOptions::SignReturnAddressScopeKind::All)
152 .Default(LangOptions::SignReturnAddressScopeKind::None);
154 if (PBP.Key == "a_key")
155 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
156 else
157 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
159 BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
160 return true;
163 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
164 return Name == "generic" ||
165 llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
168 bool AArch64TargetInfo::setCPU(const std::string &Name) {
169 return isValidCPUName(Name);
172 void AArch64TargetInfo::fillValidCPUList(
173 SmallVectorImpl<StringRef> &Values) const {
174 llvm::AArch64::fillValidCPUArchList(Values);
177 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
178 MacroBuilder &Builder) const {
179 Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
180 Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
181 Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
184 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
185 MacroBuilder &Builder) const {
186 // Also include the ARMv8.1 defines
187 getTargetDefinesARMV81A(Opts, Builder);
190 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
191 MacroBuilder &Builder) const {
192 Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
193 Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
194 // Also include the Armv8.2 defines
195 getTargetDefinesARMV82A(Opts, Builder);
198 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
199 MacroBuilder &Builder) const {
200 // Also include the Armv8.3 defines
201 getTargetDefinesARMV83A(Opts, Builder);
204 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
205 MacroBuilder &Builder) const {
206 Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
207 // Also include the Armv8.4 defines
208 getTargetDefinesARMV84A(Opts, Builder);
211 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
212 MacroBuilder &Builder) const {
213 // Also include the Armv8.5 defines
214 // FIXME: Armv8.6 makes the following extensions mandatory:
215 // - __ARM_FEATURE_BF16
216 // - __ARM_FEATURE_MATMUL_INT8
217 // Handle them here.
218 getTargetDefinesARMV85A(Opts, Builder);
221 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
222 MacroBuilder &Builder) const {
223 // Also include the Armv8.6 defines
224 getTargetDefinesARMV86A(Opts, Builder);
227 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
228 MacroBuilder &Builder) const {
229 // Also include the Armv8.7 defines
230 getTargetDefinesARMV87A(Opts, Builder);
233 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
234 MacroBuilder &Builder) const {
235 // Armv9-A maps to Armv8.5-A
236 getTargetDefinesARMV85A(Opts, Builder);
239 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
240 MacroBuilder &Builder) const {
241 // Armv9.1-A maps to Armv8.6-A
242 getTargetDefinesARMV86A(Opts, Builder);
245 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
246 MacroBuilder &Builder) const {
247 // Armv9.2-A maps to Armv8.7-A
248 getTargetDefinesARMV87A(Opts, Builder);
251 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
252 MacroBuilder &Builder) const {
253 // Armv9.3-A maps to Armv8.8-A
254 getTargetDefinesARMV88A(Opts, Builder);
257 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
258 MacroBuilder &Builder) const {
259 // Target identification.
260 Builder.defineMacro("__aarch64__");
261 // For bare-metal.
262 if (getTriple().getOS() == llvm::Triple::UnknownOS &&
263 getTriple().isOSBinFormatELF())
264 Builder.defineMacro("__ELF__");
266 // Target properties.
267 if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
268 Builder.defineMacro("_LP64");
269 Builder.defineMacro("__LP64__");
272 std::string CodeModel = getTargetOpts().CodeModel;
273 if (CodeModel == "default")
274 CodeModel = "small";
275 for (char &c : CodeModel)
276 c = toupper(c);
277 Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
279 // ACLE predefines. Many can only have one possible value on v8 AArch64.
280 Builder.defineMacro("__ARM_ACLE", "200");
281 Builder.defineMacro("__ARM_ARCH", getArchVersionString(ArchKind));
282 Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + getArchProfile() + "'");
284 Builder.defineMacro("__ARM_64BIT_STATE", "1");
285 Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
286 Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
288 Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
289 Builder.defineMacro("__ARM_FEATURE_FMA", "1");
290 Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
291 Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
292 Builder.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility
293 Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
294 Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
296 Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
298 // 0xe implies support for half, single and double precision operations.
299 Builder.defineMacro("__ARM_FP", "0xE");
301 // PCS specifies this for SysV variants, which is all we support. Other ABIs
302 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
303 Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
304 Builder.defineMacro("__ARM_FP16_ARGS", "1");
306 if (Opts.UnsafeFPMath)
307 Builder.defineMacro("__ARM_FP_FAST", "1");
309 Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
310 Twine(Opts.WCharSize ? Opts.WCharSize : 4));
312 Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
314 if (FPU & NeonMode) {
315 Builder.defineMacro("__ARM_NEON", "1");
316 // 64-bit NEON supports half, single and double precision operations.
317 Builder.defineMacro("__ARM_NEON_FP", "0xE");
320 if (FPU & SveMode)
321 Builder.defineMacro("__ARM_FEATURE_SVE", "1");
323 if ((FPU & NeonMode) && (FPU & SveMode))
324 Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
326 if (HasSVE2)
327 Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
329 if (HasSVE2 && HasSVE2AES)
330 Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
332 if (HasSVE2 && HasSVE2BitPerm)
333 Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
335 if (HasSVE2 && HasSVE2SHA3)
336 Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
338 if (HasSVE2 && HasSVE2SM4)
339 Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
341 if (HasCRC)
342 Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
344 // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
345 // macros for AES, SHA2, SHA3 and SM4
346 if (HasAES && HasSHA2)
347 Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
349 if (HasAES)
350 Builder.defineMacro("__ARM_FEATURE_AES", "1");
352 if (HasSHA2)
353 Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
355 if (HasSHA3) {
356 Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
357 Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
360 if (HasSM4) {
361 Builder.defineMacro("__ARM_FEATURE_SM3", "1");
362 Builder.defineMacro("__ARM_FEATURE_SM4", "1");
365 if (HasUnaligned)
366 Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
368 if ((FPU & NeonMode) && HasFullFP16)
369 Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
370 if (HasFullFP16)
371 Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
373 if (HasDotProd)
374 Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
376 if (HasMTE)
377 Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
379 if (HasTME)
380 Builder.defineMacro("__ARM_FEATURE_TME", "1");
382 if (HasMatMul)
383 Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
385 if (HasLSE)
386 Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
388 if (HasBFloat16) {
389 Builder.defineMacro("__ARM_FEATURE_BF16", "1");
390 Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
391 Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
392 Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
395 if ((FPU & SveMode) && HasBFloat16) {
396 Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
399 if ((FPU & SveMode) && HasMatmulFP64)
400 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
402 if ((FPU & SveMode) && HasMatmulFP32)
403 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
405 if ((FPU & SveMode) && HasMatMul)
406 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
408 if ((FPU & NeonMode) && HasFP16FML)
409 Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
411 if (Opts.hasSignReturnAddress()) {
412 // Bitmask:
413 // 0: Protection using the A key
414 // 1: Protection using the B key
415 // 2: Protection including leaf functions
416 unsigned Value = 0;
418 if (Opts.isSignReturnAddressWithAKey())
419 Value |= (1 << 0);
420 else
421 Value |= (1 << 1);
423 if (Opts.isSignReturnAddressScopeAll())
424 Value |= (1 << 2);
426 Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
429 if (Opts.BranchTargetEnforcement)
430 Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
432 if (HasLS64)
433 Builder.defineMacro("__ARM_FEATURE_LS64", "1");
435 if (HasRandGen)
436 Builder.defineMacro("__ARM_FEATURE_RNG", "1");
438 if (HasMOPS)
439 Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
441 switch (ArchKind) {
442 default:
443 break;
444 case llvm::AArch64::ArchKind::ARMV8_1A:
445 getTargetDefinesARMV81A(Opts, Builder);
446 break;
447 case llvm::AArch64::ArchKind::ARMV8_2A:
448 getTargetDefinesARMV82A(Opts, Builder);
449 break;
450 case llvm::AArch64::ArchKind::ARMV8_3A:
451 getTargetDefinesARMV83A(Opts, Builder);
452 break;
453 case llvm::AArch64::ArchKind::ARMV8_4A:
454 getTargetDefinesARMV84A(Opts, Builder);
455 break;
456 case llvm::AArch64::ArchKind::ARMV8_5A:
457 getTargetDefinesARMV85A(Opts, Builder);
458 break;
459 case llvm::AArch64::ArchKind::ARMV8_6A:
460 getTargetDefinesARMV86A(Opts, Builder);
461 break;
462 case llvm::AArch64::ArchKind::ARMV8_7A:
463 getTargetDefinesARMV87A(Opts, Builder);
464 break;
465 case llvm::AArch64::ArchKind::ARMV8_8A:
466 getTargetDefinesARMV88A(Opts, Builder);
467 break;
468 case llvm::AArch64::ArchKind::ARMV9A:
469 getTargetDefinesARMV9A(Opts, Builder);
470 break;
471 case llvm::AArch64::ArchKind::ARMV9_1A:
472 getTargetDefinesARMV91A(Opts, Builder);
473 break;
474 case llvm::AArch64::ArchKind::ARMV9_2A:
475 getTargetDefinesARMV92A(Opts, Builder);
476 break;
477 case llvm::AArch64::ArchKind::ARMV9_3A:
478 getTargetDefinesARMV93A(Opts, Builder);
479 break;
482 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
483 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
484 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
485 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
486 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
488 // Allow detection of fast FMA support.
489 Builder.defineMacro("__FP_FAST_FMA", "1");
490 Builder.defineMacro("__FP_FAST_FMAF", "1");
492 // C/C++ operators work on both VLS and VLA SVE types
493 if (FPU & SveMode)
494 Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
496 if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
497 Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
501 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
502 return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
503 Builtin::FirstTSBuiltin);
506 Optional<std::pair<unsigned, unsigned>>
507 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
508 if (LangOpts.VScaleMin || LangOpts.VScaleMax)
509 return std::pair<unsigned, unsigned>(
510 LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
512 if (hasFeature("sve"))
513 return std::pair<unsigned, unsigned>(1, 16);
515 return None;
518 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
519 return llvm::StringSwitch<bool>(Feature)
520 .Cases("aarch64", "arm64", "arm", true)
521 .Case("neon", FPU & NeonMode)
522 .Cases("sve", "sve2", "sve2-bitperm", "sve2-aes", "sve2-sha3", "sve2-sm4", "f64mm", "f32mm", "i8mm", "bf16", FPU & SveMode)
523 .Case("ls64", HasLS64)
524 .Default(false);
527 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
528 DiagnosticsEngine &Diags) {
529 FPU = FPUMode;
530 HasCRC = false;
531 HasAES = false;
532 HasSHA2 = false;
533 HasSHA3 = false;
534 HasSM4 = false;
535 HasUnaligned = true;
536 HasFullFP16 = false;
537 HasDotProd = false;
538 HasFP16FML = false;
539 HasMTE = false;
540 HasTME = false;
541 HasLS64 = false;
542 HasRandGen = false;
543 HasMatMul = false;
544 HasBFloat16 = false;
545 HasSVE2 = false;
546 HasSVE2AES = false;
547 HasSVE2SHA3 = false;
548 HasSVE2SM4 = false;
549 HasSVE2BitPerm = false;
550 HasMatmulFP64 = false;
551 HasMatmulFP32 = false;
552 HasLSE = false;
553 HasMOPS = false;
555 ArchKind = llvm::AArch64::ArchKind::INVALID;
557 for (const auto &Feature : Features) {
558 if (Feature == "+neon")
559 FPU |= NeonMode;
560 if (Feature == "+sve") {
561 FPU |= SveMode;
562 HasFullFP16 = true;
564 if (Feature == "+sve2") {
565 FPU |= SveMode;
566 HasFullFP16 = true;
567 HasSVE2 = true;
569 if (Feature == "+sve2-aes") {
570 FPU |= SveMode;
571 HasFullFP16 = true;
572 HasSVE2 = true;
573 HasSVE2AES = true;
575 if (Feature == "+sve2-sha3") {
576 FPU |= SveMode;
577 HasFullFP16 = true;
578 HasSVE2 = true;
579 HasSVE2SHA3 = true;
581 if (Feature == "+sve2-sm4") {
582 FPU |= SveMode;
583 HasFullFP16 = true;
584 HasSVE2 = true;
585 HasSVE2SM4 = true;
587 if (Feature == "+sve2-bitperm") {
588 FPU |= SveMode;
589 HasFullFP16 = true;
590 HasSVE2 = true;
591 HasSVE2BitPerm = true;
593 if (Feature == "+f32mm") {
594 FPU |= SveMode;
595 HasMatmulFP32 = true;
597 if (Feature == "+f64mm") {
598 FPU |= SveMode;
599 HasMatmulFP64 = true;
601 if (Feature == "+crc")
602 HasCRC = true;
603 if (Feature == "+aes")
604 HasAES = true;
605 if (Feature == "+sha2")
606 HasSHA2 = true;
607 if (Feature == "+sha3") {
608 HasSHA2 = true;
609 HasSHA3 = true;
611 if (Feature == "+sm4")
612 HasSM4 = true;
613 if (Feature == "+strict-align")
614 HasUnaligned = false;
615 if (Feature == "+v8a")
616 ArchKind = llvm::AArch64::ArchKind::ARMV8A;
617 if (Feature == "+v8.1a")
618 ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
619 if (Feature == "+v8.2a")
620 ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
621 if (Feature == "+v8.3a")
622 ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
623 if (Feature == "+v8.4a")
624 ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
625 if (Feature == "+v8.5a")
626 ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
627 if (Feature == "+v8.6a")
628 ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
629 if (Feature == "+v8.7a")
630 ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
631 if (Feature == "+v8.8a")
632 ArchKind = llvm::AArch64::ArchKind::ARMV8_8A;
633 if (Feature == "+v9a")
634 ArchKind = llvm::AArch64::ArchKind::ARMV9A;
635 if (Feature == "+v9.1a")
636 ArchKind = llvm::AArch64::ArchKind::ARMV9_1A;
637 if (Feature == "+v9.2a")
638 ArchKind = llvm::AArch64::ArchKind::ARMV9_2A;
639 if (Feature == "+v9.3a")
640 ArchKind = llvm::AArch64::ArchKind::ARMV9_3A;
641 if (Feature == "+v8r")
642 ArchKind = llvm::AArch64::ArchKind::ARMV8R;
643 if (Feature == "+fullfp16")
644 HasFullFP16 = true;
645 if (Feature == "+dotprod")
646 HasDotProd = true;
647 if (Feature == "+fp16fml")
648 HasFP16FML = true;
649 if (Feature == "+mte")
650 HasMTE = true;
651 if (Feature == "+tme")
652 HasTME = true;
653 if (Feature == "+pauth")
654 HasPAuth = true;
655 if (Feature == "+i8mm")
656 HasMatMul = true;
657 if (Feature == "+bf16")
658 HasBFloat16 = true;
659 if (Feature == "+lse")
660 HasLSE = true;
661 if (Feature == "+ls64")
662 HasLS64 = true;
663 if (Feature == "+rand")
664 HasRandGen = true;
665 if (Feature == "+flagm")
666 HasFlagM = true;
667 if (Feature == "+mops")
668 HasMOPS = true;
671 setDataLayout();
673 return true;
676 bool AArch64TargetInfo::hasBFloat16Type() const {
677 return true;
680 TargetInfo::CallingConvCheckResult
681 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
682 switch (CC) {
683 case CC_C:
684 case CC_Swift:
685 case CC_SwiftAsync:
686 case CC_PreserveMost:
687 case CC_PreserveAll:
688 case CC_OpenCLKernel:
689 case CC_AArch64VectorCall:
690 case CC_AArch64SVEPCS:
691 case CC_Win64:
692 return CCCR_OK;
693 default:
694 return CCCR_Warning;
698 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
700 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
701 return TargetInfo::AArch64ABIBuiltinVaList;
704 const char *const AArch64TargetInfo::GCCRegNames[] = {
705 // 32-bit Integer registers
706 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
707 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
708 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
710 // 64-bit Integer registers
711 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
712 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
713 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
715 // 32-bit floating point regsisters
716 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
717 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
718 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
720 // 64-bit floating point regsisters
721 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
722 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
723 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
725 // Neon vector registers
726 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
727 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
728 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
730 // SVE vector registers
731 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
732 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
733 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
735 // SVE predicate registers
736 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
737 "p11", "p12", "p13", "p14", "p15"
740 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
741 return llvm::makeArrayRef(GCCRegNames);
744 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
745 {{"w31"}, "wsp"},
746 {{"x31"}, "sp"},
747 // GCC rN registers are aliases of xN registers.
748 {{"r0"}, "x0"},
749 {{"r1"}, "x1"},
750 {{"r2"}, "x2"},
751 {{"r3"}, "x3"},
752 {{"r4"}, "x4"},
753 {{"r5"}, "x5"},
754 {{"r6"}, "x6"},
755 {{"r7"}, "x7"},
756 {{"r8"}, "x8"},
757 {{"r9"}, "x9"},
758 {{"r10"}, "x10"},
759 {{"r11"}, "x11"},
760 {{"r12"}, "x12"},
761 {{"r13"}, "x13"},
762 {{"r14"}, "x14"},
763 {{"r15"}, "x15"},
764 {{"r16"}, "x16"},
765 {{"r17"}, "x17"},
766 {{"r18"}, "x18"},
767 {{"r19"}, "x19"},
768 {{"r20"}, "x20"},
769 {{"r21"}, "x21"},
770 {{"r22"}, "x22"},
771 {{"r23"}, "x23"},
772 {{"r24"}, "x24"},
773 {{"r25"}, "x25"},
774 {{"r26"}, "x26"},
775 {{"r27"}, "x27"},
776 {{"r28"}, "x28"},
777 {{"r29", "x29"}, "fp"},
778 {{"r30", "x30"}, "lr"},
779 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
780 // don't want to substitute one of these for a different-sized one.
783 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
784 return llvm::makeArrayRef(GCCRegAliases);
787 bool AArch64TargetInfo::validateAsmConstraint(
788 const char *&Name, TargetInfo::ConstraintInfo &Info) const {
789 switch (*Name) {
790 default:
791 return false;
792 case 'w': // Floating point and SIMD registers (V0-V31)
793 Info.setAllowsRegister();
794 return true;
795 case 'I': // Constant that can be used with an ADD instruction
796 case 'J': // Constant that can be used with a SUB instruction
797 case 'K': // Constant that can be used with a 32-bit logical instruction
798 case 'L': // Constant that can be used with a 64-bit logical instruction
799 case 'M': // Constant that can be used as a 32-bit MOV immediate
800 case 'N': // Constant that can be used as a 64-bit MOV immediate
801 case 'Y': // Floating point constant zero
802 case 'Z': // Integer constant zero
803 return true;
804 case 'Q': // A memory reference with base register and no offset
805 Info.setAllowsMemory();
806 return true;
807 case 'S': // A symbolic address
808 Info.setAllowsRegister();
809 return true;
810 case 'U':
811 if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
812 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
813 Info.setAllowsRegister();
814 Name += 2;
815 return true;
817 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
818 // Utf: A memory address suitable for ldp/stp in TF mode.
819 // Usa: An absolute symbolic address.
820 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
822 // Better to return an error saying that it's an unrecognised constraint
823 // even if this is a valid constraint in gcc.
824 return false;
825 case 'z': // Zero register, wzr or xzr
826 Info.setAllowsRegister();
827 return true;
828 case 'x': // Floating point and SIMD registers (V0-V15)
829 Info.setAllowsRegister();
830 return true;
831 case 'y': // SVE registers (V0-V7)
832 Info.setAllowsRegister();
833 return true;
835 return false;
838 bool AArch64TargetInfo::validateConstraintModifier(
839 StringRef Constraint, char Modifier, unsigned Size,
840 std::string &SuggestedModifier) const {
841 // Strip off constraint modifiers.
842 while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
843 Constraint = Constraint.substr(1);
845 switch (Constraint[0]) {
846 default:
847 return true;
848 case 'z':
849 case 'r': {
850 switch (Modifier) {
851 case 'x':
852 case 'w':
853 // For now assume that the person knows what they're
854 // doing with the modifier.
855 return true;
856 default:
857 // By default an 'r' constraint will be in the 'x'
858 // registers.
859 if (Size == 64)
860 return true;
862 if (Size == 512)
863 return HasLS64;
865 SuggestedModifier = "w";
866 return false;
872 const char *AArch64TargetInfo::getClobbers() const { return ""; }
874 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
875 if (RegNo == 0)
876 return 0;
877 if (RegNo == 1)
878 return 1;
879 return -1;
882 bool AArch64TargetInfo::hasInt128Type() const { return true; }
884 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
885 const TargetOptions &Opts)
886 : AArch64TargetInfo(Triple, Opts) {}
888 void AArch64leTargetInfo::setDataLayout() {
889 if (getTriple().isOSBinFormatMachO()) {
890 if(getTriple().isArch32Bit())
891 resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
892 else
893 resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
894 } else
895 resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
898 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
899 MacroBuilder &Builder) const {
900 Builder.defineMacro("__AARCH64EL__");
901 AArch64TargetInfo::getTargetDefines(Opts, Builder);
904 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
905 const TargetOptions &Opts)
906 : AArch64TargetInfo(Triple, Opts) {}
908 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
909 MacroBuilder &Builder) const {
910 Builder.defineMacro("__AARCH64EB__");
911 Builder.defineMacro("__AARCH_BIG_ENDIAN");
912 Builder.defineMacro("__ARM_BIG_ENDIAN");
913 AArch64TargetInfo::getTargetDefines(Opts, Builder);
916 void AArch64beTargetInfo::setDataLayout() {
917 assert(!getTriple().isOSBinFormatMachO());
918 resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
921 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
922 const TargetOptions &Opts)
923 : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
925 // This is an LLP64 platform.
926 // int:4, long:4, long long:8, long double:8.
927 IntWidth = IntAlign = 32;
928 LongWidth = LongAlign = 32;
929 DoubleAlign = LongLongAlign = 64;
930 LongDoubleWidth = LongDoubleAlign = 64;
931 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
932 IntMaxType = SignedLongLong;
933 Int64Type = SignedLongLong;
934 SizeType = UnsignedLongLong;
935 PtrDiffType = SignedLongLong;
936 IntPtrType = SignedLongLong;
939 void WindowsARM64TargetInfo::setDataLayout() {
940 resetDataLayout(Triple.isOSBinFormatMachO()
941 ? "e-m:o-i64:64-i128:128-n32:64-S128"
942 : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
943 Triple.isOSBinFormatMachO() ? "_" : "");
946 TargetInfo::BuiltinVaListKind
947 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
948 return TargetInfo::CharPtrBuiltinVaList;
951 TargetInfo::CallingConvCheckResult
952 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
953 switch (CC) {
954 case CC_X86StdCall:
955 case CC_X86ThisCall:
956 case CC_X86FastCall:
957 case CC_X86VectorCall:
958 return CCCR_Ignore;
959 case CC_C:
960 case CC_OpenCLKernel:
961 case CC_PreserveMost:
962 case CC_PreserveAll:
963 case CC_Swift:
964 case CC_SwiftAsync:
965 case CC_Win64:
966 return CCCR_OK;
967 default:
968 return CCCR_Warning;
972 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
973 const TargetOptions &Opts)
974 : WindowsARM64TargetInfo(Triple, Opts) {
975 TheCXXABI.set(TargetCXXABI::Microsoft);
978 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
979 MacroBuilder &Builder) const {
980 WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
981 Builder.defineMacro("_M_ARM64", "1");
984 TargetInfo::CallingConvKind
985 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
986 return CCK_MicrosoftWin64;
989 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
990 unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
992 // MSVC does size based alignment for arm64 based on alignment section in
993 // below document, replicate that to keep alignment consistent with object
994 // files compiled by MSVC.
995 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
996 if (TypeSize >= 512) { // TypeSize >= 64 bytes
997 Align = std::max(Align, 128u); // align type at least 16 bytes
998 } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
999 Align = std::max(Align, 64u); // align type at least 8 butes
1000 } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
1001 Align = std::max(Align, 32u); // align type at least 4 bytes
1003 return Align;
1006 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1007 const TargetOptions &Opts)
1008 : WindowsARM64TargetInfo(Triple, Opts) {
1009 TheCXXABI.set(TargetCXXABI::GenericAArch64);
1012 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1013 const TargetOptions &Opts)
1014 : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1015 Int64Type = SignedLongLong;
1016 if (getTriple().isArch32Bit())
1017 IntMaxType = SignedLongLong;
1019 WCharType = SignedInt;
1020 UseSignedCharForObjCBool = false;
1022 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1023 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1025 UseZeroLengthBitfieldAlignment = false;
1027 if (getTriple().isArch32Bit()) {
1028 UseBitFieldTypeAlignment = false;
1029 ZeroLengthBitfieldBoundary = 32;
1030 UseZeroLengthBitfieldAlignment = true;
1031 TheCXXABI.set(TargetCXXABI::WatchOS);
1032 } else
1033 TheCXXABI.set(TargetCXXABI::AppleARM64);
1036 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1037 const llvm::Triple &Triple,
1038 MacroBuilder &Builder) const {
1039 Builder.defineMacro("__AARCH64_SIMD__");
1040 if (Triple.isArch32Bit())
1041 Builder.defineMacro("__ARM64_ARCH_8_32__");
1042 else
1043 Builder.defineMacro("__ARM64_ARCH_8__");
1044 Builder.defineMacro("__ARM_NEON__");
1045 Builder.defineMacro("__LITTLE_ENDIAN__");
1046 Builder.defineMacro("__REGISTER_PREFIX__", "");
1047 Builder.defineMacro("__arm64", "1");
1048 Builder.defineMacro("__arm64__", "1");
1050 if (Triple.isArm64e())
1051 Builder.defineMacro("__arm64e__", "1");
1053 getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1056 TargetInfo::BuiltinVaListKind
1057 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1058 return TargetInfo::CharPtrBuiltinVaList;
1061 // 64-bit RenderScript is aarch64
1062 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1063 const TargetOptions &Opts)
1064 : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1065 Triple.getOSName(),
1066 Triple.getEnvironmentName()),
1067 Opts) {
1068 IsRenderScriptTarget = true;
1071 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1072 MacroBuilder &Builder) const {
1073 Builder.defineMacro("__RENDERSCRIPT__");
1074 AArch64leTargetInfo::getTargetDefines(Opts, Builder);