1 //===-- cpu_model.c - Support for __cpu_model builtin ------------*- C -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is based on LLVM's lib/Support/Host.cpp.
10 // It implements the operating system Host concept and builtin
11 // __cpu_model for the compiler_rt library for x86 and
12 // __aarch64_have_lse_atomics for AArch64.
14 //===----------------------------------------------------------------------===//
16 #ifndef __has_attribute
17 #define __has_attribute(attr) 0
20 #if defined(HAVE_INIT_PRIORITY)
21 #define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__ 101))
22 #elif __has_attribute(__constructor__)
23 #define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__))
25 // FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that
26 // this runs during initialization.
27 #define CONSTRUCTOR_ATTRIBUTE
30 #if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
32 (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))
44 enum VendorSignatures
{
45 SIG_INTEL
= 0x756e6547, // Genu
46 SIG_AMD
= 0x68747541, // Auth
49 enum ProcessorVendors
{
75 enum ProcessorSubtypes
{
76 INTEL_COREI7_NEHALEM
= 1,
77 INTEL_COREI7_WESTMERE
,
78 INTEL_COREI7_SANDYBRIDGE
,
87 INTEL_COREI7_IVYBRIDGE
,
89 INTEL_COREI7_BROADWELL
,
91 INTEL_COREI7_SKYLAKE_AVX512
,
92 INTEL_COREI7_CANNONLAKE
,
93 INTEL_COREI7_ICELAKE_CLIENT
,
94 INTEL_COREI7_ICELAKE_SERVER
,
96 INTEL_COREI7_CASCADELAKE
,
97 INTEL_COREI7_TIGERLAKE
,
98 INTEL_COREI7_COOPERLAKE
,
99 INTEL_COREI7_SAPPHIRERAPIDS
,
100 INTEL_COREI7_ALDERLAKE
,
102 INTEL_COREI7_ROCKETLAKE
,
106 enum ProcessorFeatures
{
135 FEATURE_AVX5124VNNIW
,
136 FEATURE_AVX5124FMAPS
,
137 FEATURE_AVX512VPOPCNTDQ
,
142 FEATURE_AVX512BITALG
,
144 FEATURE_AVX512VP2INTERSECT
,
148 // The check below for i386 was copied from clang's cpuid.h (__get_cpuid_max).
149 // Check motivated by bug reports for OpenSSL crashing on CPUs without CPUID
150 // support. Consequently, for i386, the presence of CPUID is checked first
151 // via the corresponding eflags bit.
152 static bool isCpuIdSupported() {
153 #if defined(__GNUC__) || defined(__clang__)
154 #if defined(__i386__)
155 int __cpuid_supported
;
158 " movl %%eax,%%ecx\n"
159 " xorl $0x00200000,%%eax\n"
165 " cmpl %%eax,%%ecx\n"
169 : "=r"(__cpuid_supported
)
172 if (!__cpuid_supported
)
180 // This code is copied from lib/Support/Host.cpp.
181 // Changes to either file should be mirrored in the other.
183 /// getX86CpuIDAndInfo - Execute the specified cpuid and return the 4 values in
184 /// the specified arguments. If we can't run cpuid on the host, return true.
185 static bool getX86CpuIDAndInfo(unsigned value
, unsigned *rEAX
, unsigned *rEBX
,
186 unsigned *rECX
, unsigned *rEDX
) {
187 #if defined(__GNUC__) || defined(__clang__)
188 #if defined(__x86_64__)
189 // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
190 // FIXME: should we save this for Clang?
191 __asm__("movq\t%%rbx, %%rsi\n\t"
193 "xchgq\t%%rbx, %%rsi\n\t"
194 : "=a"(*rEAX
), "=S"(*rEBX
), "=c"(*rECX
), "=d"(*rEDX
)
197 #elif defined(__i386__)
198 __asm__("movl\t%%ebx, %%esi\n\t"
200 "xchgl\t%%ebx, %%esi\n\t"
201 : "=a"(*rEAX
), "=S"(*rEBX
), "=c"(*rECX
), "=d"(*rEDX
)
207 #elif defined(_MSC_VER)
208 // The MSVC intrinsic is portable across x86 and x64.
210 __cpuid(registers
, value
);
211 *rEAX
= registers
[0];
212 *rEBX
= registers
[1];
213 *rECX
= registers
[2];
214 *rEDX
= registers
[3];
221 /// getX86CpuIDAndInfoEx - Execute the specified cpuid with subleaf and return
222 /// the 4 values in the specified arguments. If we can't run cpuid on the host,
224 static bool getX86CpuIDAndInfoEx(unsigned value
, unsigned subleaf
,
225 unsigned *rEAX
, unsigned *rEBX
, unsigned *rECX
,
227 #if defined(__GNUC__) || defined(__clang__)
228 #if defined(__x86_64__)
229 // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
230 // FIXME: should we save this for Clang?
231 __asm__("movq\t%%rbx, %%rsi\n\t"
233 "xchgq\t%%rbx, %%rsi\n\t"
234 : "=a"(*rEAX
), "=S"(*rEBX
), "=c"(*rECX
), "=d"(*rEDX
)
235 : "a"(value
), "c"(subleaf
));
237 #elif defined(__i386__)
238 __asm__("movl\t%%ebx, %%esi\n\t"
240 "xchgl\t%%ebx, %%esi\n\t"
241 : "=a"(*rEAX
), "=S"(*rEBX
), "=c"(*rECX
), "=d"(*rEDX
)
242 : "a"(value
), "c"(subleaf
));
247 #elif defined(_MSC_VER)
249 __cpuidex(registers
, value
, subleaf
);
250 *rEAX
= registers
[0];
251 *rEBX
= registers
[1];
252 *rECX
= registers
[2];
253 *rEDX
= registers
[3];
260 // Read control register 0 (XCR0). Used to detect features such as AVX.
261 static bool getX86XCR0(unsigned *rEAX
, unsigned *rEDX
) {
262 #if defined(__GNUC__) || defined(__clang__)
263 // Check xgetbv; this uses a .byte sequence instead of the instruction
264 // directly because older assemblers do not include support for xgetbv and
265 // there is no easy way to conditionally compile based on the assembler used.
266 __asm__(".byte 0x0f, 0x01, 0xd0" : "=a"(*rEAX
), "=d"(*rEDX
) : "c"(0));
268 #elif defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
269 unsigned long long Result
= _xgetbv(_XCR_XFEATURE_ENABLED_MASK
);
271 *rEDX
= Result
>> 32;
278 static void detectX86FamilyModel(unsigned EAX
, unsigned *Family
,
280 *Family
= (EAX
>> 8) & 0xf; // Bits 8 - 11
281 *Model
= (EAX
>> 4) & 0xf; // Bits 4 - 7
282 if (*Family
== 6 || *Family
== 0xf) {
284 // Examine extended family ID if family ID is F.
285 *Family
+= (EAX
>> 20) & 0xff; // Bits 20 - 27
286 // Examine extended model ID if family ID is 6 or F.
287 *Model
+= ((EAX
>> 16) & 0xf) << 4; // Bits 16 - 19
292 getIntelProcessorTypeAndSubtype(unsigned Family
, unsigned Model
,
293 const unsigned *Features
,
294 unsigned *Type
, unsigned *Subtype
) {
295 #define testFeature(F) \
296 (Features[F / 32] & (1 << (F % 32))) != 0
298 // We select CPU strings to match the code in Host.cpp, but we don't use them
305 case 0x0f: // Intel Core 2 Duo processor, Intel Core 2 Duo mobile
306 // processor, Intel Core 2 Quad processor, Intel Core 2 Quad
307 // mobile processor, Intel Core 2 Extreme processor, Intel
308 // Pentium Dual-Core processor, Intel Xeon processor, model
309 // 0Fh. All processors are manufactured using the 65 nm process.
310 case 0x16: // Intel Celeron processor model 16h. All processors are
311 // manufactured using the 65 nm process
315 case 0x17: // Intel Core 2 Extreme processor, Intel Xeon processor, model
316 // 17h. All processors are manufactured using the 45 nm process.
318 // 45nm: Penryn , Wolfdale, Yorkfield (XE)
319 case 0x1d: // Intel Xeon processor MP. All processors are manufactured using
320 // the 45 nm process.
324 case 0x1a: // Intel Core i7 processor and Intel Xeon processor. All
325 // processors are manufactured using the 45 nm process.
326 case 0x1e: // Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz.
327 // As found in a Summer 2010 model iMac.
329 case 0x2e: // Nehalem EX
331 *Type
= INTEL_COREI7
;
332 *Subtype
= INTEL_COREI7_NEHALEM
;
334 case 0x25: // Intel Core i7, laptop version.
335 case 0x2c: // Intel Core i7 processor and Intel Xeon processor. All
336 // processors are manufactured using the 32 nm process.
337 case 0x2f: // Westmere EX
339 *Type
= INTEL_COREI7
;
340 *Subtype
= INTEL_COREI7_WESTMERE
;
342 case 0x2a: // Intel Core i7 processor. All processors are manufactured
343 // using the 32 nm process.
346 *Type
= INTEL_COREI7
;
347 *Subtype
= INTEL_COREI7_SANDYBRIDGE
;
350 case 0x3e: // Ivy Bridge EP
352 *Type
= INTEL_COREI7
;
353 *Subtype
= INTEL_COREI7_IVYBRIDGE
;
362 *Type
= INTEL_COREI7
;
363 *Subtype
= INTEL_COREI7_HASWELL
;
372 *Type
= INTEL_COREI7
;
373 *Subtype
= INTEL_COREI7_BROADWELL
;
377 case 0x4e: // Skylake mobile
378 case 0x5e: // Skylake desktop
379 case 0x8e: // Kaby Lake mobile
380 case 0x9e: // Kaby Lake desktop
381 case 0xa5: // Comet Lake-H/S
382 case 0xa6: // Comet Lake-U
384 *Type
= INTEL_COREI7
;
385 *Subtype
= INTEL_COREI7_SKYLAKE
;
391 *Type
= INTEL_COREI7
;
392 *Subtype
= INTEL_COREI7_ROCKETLAKE
;
397 *Type
= INTEL_COREI7
;
398 if (testFeature(FEATURE_AVX512BF16
)) {
400 *Subtype
= INTEL_COREI7_COOPERLAKE
;
401 } else if (testFeature(FEATURE_AVX512VNNI
)) {
403 *Subtype
= INTEL_COREI7_CASCADELAKE
;
405 CPU
= "skylake-avx512";
406 *Subtype
= INTEL_COREI7_SKYLAKE_AVX512
;
413 *Type
= INTEL_COREI7
;
414 *Subtype
= INTEL_COREI7_CANNONLAKE
;
420 CPU
= "icelake-client";
421 *Type
= INTEL_COREI7
;
422 *Subtype
= INTEL_COREI7_ICELAKE_CLIENT
;
429 *Type
= INTEL_COREI7
;
430 *Subtype
= INTEL_COREI7_TIGERLAKE
;
437 *Type
= INTEL_COREI7
;
438 *Subtype
= INTEL_COREI7_ALDERLAKE
;
444 CPU
= "icelake-server";
445 *Type
= INTEL_COREI7
;
446 *Subtype
= INTEL_COREI7_ICELAKE_SERVER
;
451 CPU
= "sapphirerapids";
452 *Type
= INTEL_COREI7
;
453 *Subtype
= INTEL_COREI7_SAPPHIRERAPIDS
;
456 case 0x1c: // Most 45 nm Intel Atom processors
457 case 0x26: // 45 nm Atom Lincroft
458 case 0x27: // 32 nm Atom Medfield
459 case 0x35: // 32 nm Atom Midview
460 case 0x36: // 32 nm Atom Midview
462 *Type
= INTEL_BONNELL
;
465 // Atom Silvermont codes from the Intel software optimization guide.
471 case 0x4c: // really airmont
473 *Type
= INTEL_SILVERMONT
;
476 case 0x5c: // Apollo Lake
477 case 0x5f: // Denverton
479 *Type
= INTEL_GOLDMONT
;
482 CPU
= "goldmont-plus";
483 *Type
= INTEL_GOLDMONT_PLUS
;
487 *Type
= INTEL_TREMONT
;
500 default: // Unknown family 6 CPU.
512 getAMDProcessorTypeAndSubtype(unsigned Family
, unsigned Model
,
513 const unsigned *Features
,
514 unsigned *Type
, unsigned *Subtype
) {
515 // We select CPU strings to match the code in Host.cpp, but we don't use them
525 *Subtype
= AMDFAM10H_BARCELONA
;
528 *Subtype
= AMDFAM10H_SHANGHAI
;
531 *Subtype
= AMDFAM10H_ISTANBUL
;
542 if (Model
>= 0x60 && Model
<= 0x7f) {
544 *Subtype
= AMDFAM15H_BDVER4
;
545 break; // 60h-7Fh: Excavator
547 if (Model
>= 0x30 && Model
<= 0x3f) {
549 *Subtype
= AMDFAM15H_BDVER3
;
550 break; // 30h-3Fh: Steamroller
552 if ((Model
>= 0x10 && Model
<= 0x1f) || Model
== 0x02) {
554 *Subtype
= AMDFAM15H_BDVER2
;
555 break; // 02h, 10h-1Fh: Piledriver
558 *Subtype
= AMDFAM15H_BDVER1
;
559 break; // 00h-0Fh: Bulldozer
569 if ((Model
>= 0x30 && Model
<= 0x3f) || Model
== 0x71) {
571 *Subtype
= AMDFAM17H_ZNVER2
;
572 break; // 30h-3fh, 71h: Zen2
575 *Subtype
= AMDFAM17H_ZNVER1
;
576 break; // 00h-0Fh: Zen1
582 if (Model
<= 0x0f || Model
== 0x21) {
583 *Subtype
= AMDFAM19H_ZNVER3
;
584 break; // 00h-0Fh, 21h: Zen3
588 break; // Unknown AMD CPU.
594 static void getAvailableFeatures(unsigned ECX
, unsigned EDX
, unsigned MaxLeaf
,
595 unsigned *Features
) {
598 #define setFeature(F) \
599 Features[F / 32] |= 1U << (F % 32)
602 setFeature(FEATURE_CMOV
);
604 setFeature(FEATURE_MMX
);
606 setFeature(FEATURE_SSE
);
608 setFeature(FEATURE_SSE2
);
611 setFeature(FEATURE_SSE3
);
613 setFeature(FEATURE_PCLMUL
);
615 setFeature(FEATURE_SSSE3
);
617 setFeature(FEATURE_FMA
);
619 setFeature(FEATURE_SSE4_1
);
621 setFeature(FEATURE_SSE4_2
);
623 setFeature(FEATURE_POPCNT
);
625 setFeature(FEATURE_AES
);
627 // If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV
628 // indicates that the AVX registers will be saved and restored on context
629 // switch, then we have full AVX support.
630 const unsigned AVXBits
= (1 << 27) | (1 << 28);
631 bool HasAVX
= ((ECX
& AVXBits
) == AVXBits
) && !getX86XCR0(&EAX
, &EDX
) &&
632 ((EAX
& 0x6) == 0x6);
633 #if defined(__APPLE__)
634 // Darwin lazily saves the AVX512 context on first use: trust that the OS will
635 // save the AVX512 context if we use AVX512 instructions, even the bit is not
637 bool HasAVX512Save
= true;
639 // AVX512 requires additional context to be saved by the OS.
640 bool HasAVX512Save
= HasAVX
&& ((EAX
& 0xe0) == 0xe0);
644 setFeature(FEATURE_AVX
);
647 MaxLeaf
>= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x0, &EAX
, &EBX
, &ECX
, &EDX
);
649 if (HasLeaf7
&& ((EBX
>> 3) & 1))
650 setFeature(FEATURE_BMI
);
651 if (HasLeaf7
&& ((EBX
>> 5) & 1) && HasAVX
)
652 setFeature(FEATURE_AVX2
);
653 if (HasLeaf7
&& ((EBX
>> 8) & 1))
654 setFeature(FEATURE_BMI2
);
655 if (HasLeaf7
&& ((EBX
>> 16) & 1) && HasAVX512Save
)
656 setFeature(FEATURE_AVX512F
);
657 if (HasLeaf7
&& ((EBX
>> 17) & 1) && HasAVX512Save
)
658 setFeature(FEATURE_AVX512DQ
);
659 if (HasLeaf7
&& ((EBX
>> 21) & 1) && HasAVX512Save
)
660 setFeature(FEATURE_AVX512IFMA
);
661 if (HasLeaf7
&& ((EBX
>> 26) & 1) && HasAVX512Save
)
662 setFeature(FEATURE_AVX512PF
);
663 if (HasLeaf7
&& ((EBX
>> 27) & 1) && HasAVX512Save
)
664 setFeature(FEATURE_AVX512ER
);
665 if (HasLeaf7
&& ((EBX
>> 28) & 1) && HasAVX512Save
)
666 setFeature(FEATURE_AVX512CD
);
667 if (HasLeaf7
&& ((EBX
>> 30) & 1) && HasAVX512Save
)
668 setFeature(FEATURE_AVX512BW
);
669 if (HasLeaf7
&& ((EBX
>> 31) & 1) && HasAVX512Save
)
670 setFeature(FEATURE_AVX512VL
);
672 if (HasLeaf7
&& ((ECX
>> 1) & 1) && HasAVX512Save
)
673 setFeature(FEATURE_AVX512VBMI
);
674 if (HasLeaf7
&& ((ECX
>> 6) & 1) && HasAVX512Save
)
675 setFeature(FEATURE_AVX512VBMI2
);
676 if (HasLeaf7
&& ((ECX
>> 8) & 1))
677 setFeature(FEATURE_GFNI
);
678 if (HasLeaf7
&& ((ECX
>> 10) & 1) && HasAVX
)
679 setFeature(FEATURE_VPCLMULQDQ
);
680 if (HasLeaf7
&& ((ECX
>> 11) & 1) && HasAVX512Save
)
681 setFeature(FEATURE_AVX512VNNI
);
682 if (HasLeaf7
&& ((ECX
>> 12) & 1) && HasAVX512Save
)
683 setFeature(FEATURE_AVX512BITALG
);
684 if (HasLeaf7
&& ((ECX
>> 14) & 1) && HasAVX512Save
)
685 setFeature(FEATURE_AVX512VPOPCNTDQ
);
687 if (HasLeaf7
&& ((EDX
>> 2) & 1) && HasAVX512Save
)
688 setFeature(FEATURE_AVX5124VNNIW
);
689 if (HasLeaf7
&& ((EDX
>> 3) & 1) && HasAVX512Save
)
690 setFeature(FEATURE_AVX5124FMAPS
);
691 if (HasLeaf7
&& ((EDX
>> 8) & 1) && HasAVX512Save
)
692 setFeature(FEATURE_AVX512VP2INTERSECT
);
694 bool HasLeaf7Subleaf1
=
695 MaxLeaf
>= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x1, &EAX
, &EBX
, &ECX
, &EDX
);
696 if (HasLeaf7Subleaf1
&& ((EAX
>> 5) & 1) && HasAVX512Save
)
697 setFeature(FEATURE_AVX512BF16
);
699 unsigned MaxExtLevel
;
700 getX86CpuIDAndInfo(0x80000000, &MaxExtLevel
, &EBX
, &ECX
, &EDX
);
702 bool HasExtLeaf1
= MaxExtLevel
>= 0x80000001 &&
703 !getX86CpuIDAndInfo(0x80000001, &EAX
, &EBX
, &ECX
, &EDX
);
704 if (HasExtLeaf1
&& ((ECX
>> 6) & 1))
705 setFeature(FEATURE_SSE4_A
);
706 if (HasExtLeaf1
&& ((ECX
>> 11) & 1))
707 setFeature(FEATURE_XOP
);
708 if (HasExtLeaf1
&& ((ECX
>> 16) & 1))
709 setFeature(FEATURE_FMA4
);
714 __attribute__((visibility("hidden")))
716 int __cpu_indicator_init(void) CONSTRUCTOR_ATTRIBUTE
;
719 __attribute__((visibility("hidden")))
721 struct __processor_model
{
722 unsigned int __cpu_vendor
;
723 unsigned int __cpu_type
;
724 unsigned int __cpu_subtype
;
725 unsigned int __cpu_features
[1];
726 } __cpu_model
= {0, 0, 0, {0}};
729 __attribute__((visibility("hidden")))
731 unsigned int __cpu_features2
= 0;
733 // A constructor function that is sets __cpu_model and __cpu_features2 with
734 // the right values. This needs to run only once. This constructor is
735 // given the highest priority and it should run before constructors without
736 // the priority set. However, it still runs after ifunc initializers and
737 // needs to be called explicitly there.
739 int CONSTRUCTOR_ATTRIBUTE
__cpu_indicator_init(void) {
740 unsigned EAX
, EBX
, ECX
, EDX
;
741 unsigned MaxLeaf
= 5;
743 unsigned Model
, Family
;
744 unsigned Features
[(CPU_FEATURE_MAX
+ 31) / 32] = {0};
746 // This function needs to run just once.
747 if (__cpu_model
.__cpu_vendor
)
750 if (!isCpuIdSupported() ||
751 getX86CpuIDAndInfo(0, &MaxLeaf
, &Vendor
, &ECX
, &EDX
) || MaxLeaf
< 1) {
752 __cpu_model
.__cpu_vendor
= VENDOR_OTHER
;
756 getX86CpuIDAndInfo(1, &EAX
, &EBX
, &ECX
, &EDX
);
757 detectX86FamilyModel(EAX
, &Family
, &Model
);
759 // Find available features.
760 getAvailableFeatures(ECX
, EDX
, MaxLeaf
, &Features
[0]);
762 assert((sizeof(Features
)/sizeof(Features
[0])) == 2);
763 __cpu_model
.__cpu_features
[0] = Features
[0];
764 __cpu_features2
= Features
[1];
766 if (Vendor
== SIG_INTEL
) {
768 getIntelProcessorTypeAndSubtype(Family
, Model
, &Features
[0],
769 &(__cpu_model
.__cpu_type
),
770 &(__cpu_model
.__cpu_subtype
));
771 __cpu_model
.__cpu_vendor
= VENDOR_INTEL
;
772 } else if (Vendor
== SIG_AMD
) {
774 getAMDProcessorTypeAndSubtype(Family
, Model
, &Features
[0],
775 &(__cpu_model
.__cpu_type
),
776 &(__cpu_model
.__cpu_subtype
));
777 __cpu_model
.__cpu_vendor
= VENDOR_AMD
;
779 __cpu_model
.__cpu_vendor
= VENDOR_OTHER
;
781 assert(__cpu_model
.__cpu_vendor
< VENDOR_MAX
);
782 assert(__cpu_model
.__cpu_type
< CPU_TYPE_MAX
);
783 assert(__cpu_model
.__cpu_subtype
< CPU_SUBTYPE_MAX
);
787 #elif defined(__aarch64__)
788 // LSE support detection for out-of-line atomics
789 // using HWCAP and Auxiliary vector
790 _Bool __aarch64_have_lse_atomics
791 __attribute__((visibility("hidden"), nocommon
));
792 #if defined(__has_include)
793 #if __has_include(<sys/auxv.h>)
794 #include <sys/auxv.h>
798 #ifndef HWCAP_ATOMICS
799 #define HWCAP_ATOMICS (1 << 8)
801 #if defined(__ANDROID__)
803 #include <sys/system_properties.h>
804 #elif defined(__Fuchsia__)
805 #include <zircon/features.h>
806 #include <zircon/syscalls.h>
808 static void CONSTRUCTOR_ATTRIBUTE
init_have_lse_atomics(void) {
809 #if defined(__FreeBSD__)
811 int result
= elf_aux_info(AT_HWCAP
, &hwcap
, sizeof hwcap
);
812 __aarch64_have_lse_atomics
= result
== 0 && (hwcap
& HWCAP_ATOMICS
) != 0;
813 #elif defined(__Fuchsia__)
814 // This ensures the vDSO is a direct link-time dependency of anything that
815 // needs this initializer code.
816 #pragma comment(lib, "zircon")
818 zx_status_t status
= _zx_system_get_features(ZX_FEATURE_KIND_CPU
, &features
);
819 __aarch64_have_lse_atomics
=
820 status
== ZX_OK
&& (features
& ZX_ARM64_FEATURE_ISA_ATOMICS
) != 0;
822 unsigned long hwcap
= getauxval(AT_HWCAP
);
823 _Bool result
= (hwcap
& HWCAP_ATOMICS
) != 0;
824 #if defined(__ANDROID__)
826 char arch
[PROP_VALUE_MAX
];
827 if (__system_property_get("ro.arch", arch
) > 0 &&
828 strncmp(arch
, "exynos9810", sizeof("exynos9810") - 1) == 0) {
829 // Some cores in the Exynos 9810 CPU are ARMv8.2 and others are ARMv8.0;
830 // only the former support LSE atomics. However, the kernel in the
831 // initial Android 8.0 release of Galaxy S9/S9+ devices incorrectly
832 // reported the feature as being supported.
834 // The kernel appears to have been corrected to mark it unsupported as of
835 // the Android 9.0 release on those devices, and this issue has not been
836 // observed anywhere else. Thus, this workaround may be removed if
837 // compiler-rt ever drops support for Android 8.0.
841 #endif // defined(__ANDROID__)
842 __aarch64_have_lse_atomics
= result
;
843 #endif // defined(__FreeBSD__)
845 #endif // defined(__has_include)
846 #endif // __has_include(<sys/auxv.h>)
847 #endif // defined(__aarch64__)