Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / util / cpuinfo-i386.c
blob90f92a42dc8c49e474a66f1358c4656983b454c3
1 /*
2 * SPDX-License-Identifier: GPL-2.0-or-later
3 * Host specific cpu identification for x86.
4 */
6 #include "qemu/osdep.h"
7 #include "host/cpuinfo.h"
8 #ifdef CONFIG_CPUID_H
9 # include "qemu/cpuid.h"
10 #endif
12 unsigned cpuinfo;
14 /* Called both as constructor and (possibly) via other constructors. */
15 unsigned __attribute__((constructor)) cpuinfo_init(void)
17 unsigned info = cpuinfo;
19 if (info) {
20 return info;
23 #ifdef CONFIG_CPUID_H
24 unsigned max, a, b, c, d, b7 = 0, c7 = 0;
26 max = __get_cpuid_max(0, 0);
28 if (max >= 7) {
29 __cpuid_count(7, 0, a, b7, c7, d);
30 info |= (b7 & bit_BMI ? CPUINFO_BMI1 : 0);
31 info |= (b7 & bit_BMI2 ? CPUINFO_BMI2 : 0);
34 if (max >= 1) {
35 __cpuid(1, a, b, c, d);
37 info |= (d & bit_SSE2 ? CPUINFO_SSE2 : 0);
38 info |= (c & bit_MOVBE ? CPUINFO_MOVBE : 0);
39 info |= (c & bit_POPCNT ? CPUINFO_POPCNT : 0);
40 info |= (c & bit_PCLMUL ? CPUINFO_PCLMUL : 0);
42 /* Our AES support requires PSHUFB as well. */
43 info |= ((c & bit_AES) && (c & bit_SSSE3) ? CPUINFO_AES : 0);
45 /* For AVX features, we must check available and usable. */
46 if ((c & bit_AVX) && (c & bit_OSXSAVE)) {
47 unsigned bv = xgetbv_low(0);
49 if ((bv & 6) == 6) {
50 info |= CPUINFO_AVX1;
51 info |= (b7 & bit_AVX2 ? CPUINFO_AVX2 : 0);
53 if ((bv & 0xe0) == 0xe0) {
54 info |= (b7 & bit_AVX512F ? CPUINFO_AVX512F : 0);
55 info |= (b7 & bit_AVX512VL ? CPUINFO_AVX512VL : 0);
56 info |= (b7 & bit_AVX512BW ? CPUINFO_AVX512BW : 0);
57 info |= (b7 & bit_AVX512DQ ? CPUINFO_AVX512DQ : 0);
58 info |= (c7 & bit_AVX512VBMI2 ? CPUINFO_AVX512VBMI2 : 0);
62 * The Intel SDM has added:
63 * Processors that enumerate support for IntelĀ® AVX
64 * (by setting the feature flag CPUID.01H:ECX.AVX[bit 28])
65 * guarantee that the 16-byte memory operations performed
66 * by the following instructions will always be carried
67 * out atomically:
68 * - MOVAPD, MOVAPS, and MOVDQA.
69 * - VMOVAPD, VMOVAPS, and VMOVDQA when encoded with VEX.128.
70 * - VMOVAPD, VMOVAPS, VMOVDQA32, and VMOVDQA64 when encoded
71 * with EVEX.128 and k0 (masking disabled).
72 * Note that these instructions require the linear addresses
73 * of their memory operands to be 16-byte aligned.
75 * AMD has provided an even stronger guarantee that processors
76 * with AVX provide 16-byte atomicity for all cacheable,
77 * naturally aligned single loads and stores, e.g. MOVDQU.
79 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104688
81 __cpuid(0, a, b, c, d);
82 if (c == signature_INTEL_ecx) {
83 info |= CPUINFO_ATOMIC_VMOVDQA;
84 } else if (c == signature_AMD_ecx) {
85 info |= CPUINFO_ATOMIC_VMOVDQA | CPUINFO_ATOMIC_VMOVDQU;
91 max = __get_cpuid_max(0x8000000, 0);
92 if (max >= 1) {
93 __cpuid(0x80000001, a, b, c, d);
94 info |= (c & bit_LZCNT ? CPUINFO_LZCNT : 0);
96 #endif
98 info |= CPUINFO_ALWAYS;
99 cpuinfo = info;
100 return info;