1 /* SPDX-License-Identifier: GPL-2.0-only */
4 #include <boot/coreboot_tables.h>
5 #include <console/console.h>
9 #include <cpu/x86/gdt.h>
10 #include <cpu/x86/mp.h>
11 #include <cpu/x86/lapic.h>
12 #include <cpu/x86/tsc.h>
13 #include <device/device.h>
14 #include <smp/spinlock.h>
17 /* Standard macro to see if a specific flag is changeable */
18 static inline int flag_is_changeable_p(uint32_t flag
)
33 : "=&r" (f1
), "=&r" (f2
)
35 return ((f1
^f2
) & flag
) != 0;
39 * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
40 * by the fact that they preserve the flags across the division of 5/2.
41 * PII and PPro exhibit this behavior too, but they have cpuid available.
45 * Perform the Cyrix 5/2 test. A Cyrix won't change
46 * the flags, while other 486 chips will.
48 static inline int test_cyrix_52div(void)
53 "sahf\n\t" /* clear flags (%eax = 0x0005) */
54 "div %b2\n\t" /* divide 5 by 2 */
55 "lahf" /* store flags into %ah */
60 /* AH is 0x02 on Cyrix after the divide.. */
61 return (unsigned char)(test
>> 8) == 0x02;
65 * Detect a NexGen CPU running without BIOS hypercode new enough
66 * to have CPUID. (Thanks to Herbert Oppmann)
69 static int deep_magic_nexgen_probe(void)
73 __asm__
__volatile__ (
74 " movw $0x5555, %%ax\n"
82 : "=a" (ret
) : : "cx", "dx");
87 /* List of CPU vendor strings along with their normalized
94 { X86_VENDOR_INTEL
, "GenuineIntel", },
95 { X86_VENDOR_CYRIX
, "CyrixInstead", },
96 { X86_VENDOR_AMD
, "AuthenticAMD", },
97 { X86_VENDOR_UMC
, "UMC UMC UMC ", },
98 { X86_VENDOR_NEXGEN
, "NexGenDriven", },
99 { X86_VENDOR_CENTAUR
, "CentaurHauls", },
100 { X86_VENDOR_RISE
, "RiseRiseRise", },
101 { X86_VENDOR_TRANSMETA
, "GenuineTMx86", },
102 { X86_VENDOR_TRANSMETA
, "TransmetaCPU", },
103 { X86_VENDOR_NSC
, "Geode by NSC", },
104 { X86_VENDOR_SIS
, "SiS SiS SiS ", },
105 { X86_VENDOR_HYGON
, "HygonGenuine", },
108 static const char *const x86_vendor_name
[] = {
109 [X86_VENDOR_INTEL
] = "Intel",
110 [X86_VENDOR_CYRIX
] = "Cyrix",
111 [X86_VENDOR_AMD
] = "AMD",
112 [X86_VENDOR_UMC
] = "UMC",
113 [X86_VENDOR_NEXGEN
] = "NexGen",
114 [X86_VENDOR_CENTAUR
] = "Centaur",
115 [X86_VENDOR_RISE
] = "Rise",
116 [X86_VENDOR_TRANSMETA
] = "Transmeta",
117 [X86_VENDOR_NSC
] = "NSC",
118 [X86_VENDOR_SIS
] = "SiS",
119 [X86_VENDOR_HYGON
] = "Hygon",
122 static const char *cpu_vendor_name(int vendor
)
125 name
= "<invalid CPU vendor>";
126 if (vendor
< ARRAY_SIZE(x86_vendor_name
) &&
127 x86_vendor_name
[vendor
] != 0)
128 name
= x86_vendor_name
[vendor
];
132 static void identify_cpu(struct device
*cpu
)
134 char vendor_name
[16];
137 vendor_name
[0] = '\0'; /* Unset */
140 /* Find the id and vendor_name */
141 if (!cpu_have_cpuid()) {
142 /* Its a 486 if we can modify the AC flag */
143 if (flag_is_changeable_p(X86_EFLAGS_AC
))
144 cpu
->device
= 0x00000400; /* 486 */
146 cpu
->device
= 0x00000300; /* 386 */
147 if (cpu
->device
== 0x00000400 && test_cyrix_52div())
148 memcpy(vendor_name
, "CyrixInstead", 13);
149 /* If we ever care we can enable cpuid here */
150 /* Detect NexGen with old hypercode */
151 else if (deep_magic_nexgen_probe())
152 memcpy(vendor_name
, "NexGenDriven", 13);
155 if (cpu_have_cpuid()) {
157 struct cpuid_result result
;
158 result
= cpuid(0x00000000);
159 cpuid_level
= result
.eax
;
160 vendor_name
[0] = (result
.ebx
>> 0) & 0xff;
161 vendor_name
[1] = (result
.ebx
>> 8) & 0xff;
162 vendor_name
[2] = (result
.ebx
>> 16) & 0xff;
163 vendor_name
[3] = (result
.ebx
>> 24) & 0xff;
164 vendor_name
[4] = (result
.edx
>> 0) & 0xff;
165 vendor_name
[5] = (result
.edx
>> 8) & 0xff;
166 vendor_name
[6] = (result
.edx
>> 16) & 0xff;
167 vendor_name
[7] = (result
.edx
>> 24) & 0xff;
168 vendor_name
[8] = (result
.ecx
>> 0) & 0xff;
169 vendor_name
[9] = (result
.ecx
>> 8) & 0xff;
170 vendor_name
[10] = (result
.ecx
>> 16) & 0xff;
171 vendor_name
[11] = (result
.ecx
>> 24) & 0xff;
172 vendor_name
[12] = '\0';
174 /* Intel-defined flags: level 0x00000001 */
175 if (cpuid_level
>= 0x00000001)
176 cpu
->device
= cpu_get_cpuid();
178 /* Have CPUID level 0 only unheard of */
179 cpu
->device
= 0x00000400;
181 cpu
->vendor
= X86_VENDOR_UNKNOWN
;
182 for (i
= 0; i
< ARRAY_SIZE(x86_vendors
); i
++) {
183 if (memcmp(vendor_name
, x86_vendors
[i
].name
, 12) == 0) {
184 cpu
->vendor
= x86_vendors
[i
].vendor
;
190 struct cpu_driver
*find_cpu_driver(struct device
*cpu
)
192 struct cpu_driver
*driver
;
193 for (driver
= _cpu_drivers
; driver
< _ecpu_drivers
; driver
++) {
194 const struct cpu_device_id
*id
;
195 for (id
= driver
->id_table
;
196 id
->vendor
!= X86_VENDOR_INVALID
; id
++) {
197 if (cpu
->vendor
== id
->vendor
&&
198 cpuid_match(cpu
->device
, id
->device
, id
->device_match_mask
))
200 if (id
->vendor
== X86_VENDOR_ANY
)
207 static void set_cpu_ops(struct device
*cpu
)
209 struct cpu_driver
*driver
= find_cpu_driver(cpu
);
210 cpu
->ops
= driver
? driver
->ops
: NULL
;
213 void cpu_initialize(void)
215 /* Because we busy wait at the printk spinlock.
216 * It is important to keep the number of printed messages
217 * from secondary cpus to a minimum, when debugging is
221 struct cpu_info
*info
;
222 struct cpuinfo_x86 c
;
226 printk(BIOS_INFO
, "Initializing CPU #%zd\n", info
->index
);
230 die("CPU: missing CPU device structure");
232 if (cpu
->initialized
)
237 /* Find what type of CPU we are dealing with */
239 printk(BIOS_DEBUG
, "CPU: vendor %s device %x\n",
240 cpu_vendor_name(cpu
->vendor
), cpu
->device
);
242 get_fms(&c
, cpu
->device
);
244 printk(BIOS_DEBUG
, "CPU: family %02x, model %02x, stepping %02x\n",
245 c
.x86
, c
.x86_model
, c
.x86_mask
);
247 /* Lookup the cpu's operations */
251 /* mask out the stepping and try again */
252 cpu
->device
-= c
.x86_mask
;
254 cpu
->device
+= c
.x86_mask
;
257 printk(BIOS_DEBUG
, "Using generic CPU ops (good)\n");
260 /* Initialize the CPU */
261 if (cpu
->ops
&& cpu
->ops
->init
) {
263 cpu
->initialized
= 1;
268 printk(BIOS_INFO
, "CPU #%zd initialized\n", info
->index
);
271 void lb_arch_add_records(struct lb_header
*header
)
274 struct lb_tsc_info
*tsc_info
;
276 /* Don't advertise a TSC rate unless it's constant. */
277 if (!tsc_constant_rate())
280 freq_khz
= tsc_freq_mhz() * 1000;
282 /* No use exposing a TSC frequency that is zero. */
286 tsc_info
= (void *)lb_new_record(header
);
287 tsc_info
->tag
= LB_TAG_TSC_INFO
;
288 tsc_info
->size
= sizeof(*tsc_info
);
289 tsc_info
->freq_khz
= freq_khz
;
292 void arch_bootstate_coreboot_exit(void)
294 /* APs are already parked by existing infrastructure. */
295 if (!CONFIG(PARALLEL_MP_AP_WORK
))
298 /* APs are waiting for work. Last thing to do is park them. */
302 /* cpu_info() looks at address 0 at the base of %gs for a pointer to struct cpu_info */
303 static struct per_cpu_segment_data segment_data
[CONFIG_MAX_CPUS
];
304 struct cpu_info cpu_infos
[CONFIG_MAX_CPUS
] = {0};
306 enum cb_err
set_cpu_info(unsigned int index
, struct device
*cpu
)
308 if (index
>= ARRAY_SIZE(cpu_infos
))
314 const struct cpu_info info
= { .cpu
= cpu
, .index
= index
};
315 cpu_infos
[index
] = info
;
316 segment_data
[index
].cpu_info
= &cpu_infos
[index
];
318 struct segment_descriptor
{
319 uint16_t segment_limit_0_15
;
320 uint16_t base_address_0_15
;
321 uint8_t base_address_16_23
;
323 uint8_t base_address_24_31
;
324 } *segment_descriptor
= (void *)&per_cpu_segment_descriptors
;
326 segment_descriptor
[index
].base_address_0_15
= (uintptr_t)&segment_data
[index
] & 0xffff;
327 segment_descriptor
[index
].base_address_16_23
= ((uintptr_t)&segment_data
[index
] >> 16) & 0xff;
328 segment_descriptor
[index
].base_address_24_31
= ((uintptr_t)&segment_data
[index
] >> 24) & 0xff;
330 const unsigned int cpu_segment
= per_cpu_segment_selector
+ (index
<< 3);
332 __asm__
__volatile__ ("mov %0, %%gs\n"