1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <acpi/acpigen.h>
5 #include <console/console.h>
6 #include <console/debug.h>
8 #include <cpu/intel/common/common.h>
9 #include <cpu/intel/em64t101_save_state.h>
10 #include <cpu/intel/microcode.h>
11 #include <cpu/intel/smm_reloc.h>
12 #include <cpu/intel/turbo.h>
13 #include <cpu/x86/mp.h>
14 #include <cpu/x86/mtrr.h>
15 #include <intelblocks/cpulib.h>
16 #include <intelblocks/mp_init.h>
17 #include <intelpch/lockdown.h>
19 #include <soc/pci_devs.h>
21 #include <soc/smmrelocate.h>
22 #include <soc/soc_util.h>
28 static const void *microcode_patch
;
30 static const config_t
*chip_config
= NULL
;
32 bool cpu_soc_is_in_untrusted_mode(void)
34 /* IA_UNTRUSTED_MODE is not supported in Cooper Lake */
38 void cpu_soc_bios_done(void)
40 /* IA_UNTRUSTED_MODE is not supported in Cooper Lake */
43 static void xeon_configure_mca(void)
46 struct cpuid_result cpuid_regs
;
49 * Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
50 * and CPUID.(EAX=1):EDX[14]==1 MCA
52 cpuid_regs
= cpuid(1);
53 if ((cpuid_regs
.edx
& (1 << 7 | 1 << 14)) != (1 << 7 | 1 << 14))
56 msr
= rdmsr(IA32_MCG_CAP
);
57 if (msr
.lo
& IA32_MCG_CAP_CTL_P_MASK
) {
58 /* Enable all error logging */
59 msr
.lo
= msr
.hi
= 0xffffffff;
60 wrmsr(IA32_MCG_CTL
, msr
);
67 * On server platforms the FIT mechanism only updates the microcode on
68 * the BSP. Loading MCU on AP in parallel seems to fail in 10% of the cases
69 * so do it serialized.
71 void get_microcode_info(const void **microcode
, int *parallel
)
73 *microcode
= intel_microcode_find();
77 static void each_cpu_init(struct device
*cpu
)
81 printk(BIOS_SPEW
, "%s dev: %s, cpu: %lu, apic_id: 0x%x, package_id: 0x%x\n",
82 __func__
, dev_path(cpu
), cpu_index(), cpu
->path
.apic
.apic_id
,
83 cpu
->path
.apic
.package_id
);
86 * Set HWP base feature, EPP reg enumeration, lock thermal and msr
87 * This is package level MSR. Need to check if it updates correctly on
88 * multi-socket platform.
90 msr
= rdmsr(MSR_MISC_PWR_MGMT
);
91 if (!(msr
.lo
& LOCK_MISC_PWR_MGMT_MSR
)) { /* if already locked skip update */
92 msr
.lo
= (HWP_ENUM_ENABLE
| HWP_EPP_ENUM_ENABLE
| LOCK_MISC_PWR_MGMT_MSR
|
94 wrmsr(MSR_MISC_PWR_MGMT
, msr
);
97 /* Enable Fast Strings */
98 msr
= rdmsr(IA32_MISC_ENABLE
);
99 msr
.lo
|= FAST_STRINGS_ENABLE_BIT
;
100 wrmsr(IA32_MISC_ENABLE
, msr
);
104 /* Enable speed step. */
105 if (get_turbo_state() == TURBO_ENABLED
) {
106 msr
= rdmsr(IA32_MISC_ENABLE
);
107 msr
.lo
|= SPEED_STEP_ENABLE_BIT
;
108 wrmsr(IA32_MISC_ENABLE
, msr
);
111 /* Clear out pending MCEs */
112 xeon_configure_mca();
118 /* The MSRs and CSRS have the same register layout. Use the CSRS bit definitions
119 Lock Turbo. Did FSP-S set this up??? */
120 msr
= rdmsr(MSR_TURBO_ACTIVATION_RATIO
);
121 msr
.lo
|= (TURBO_ACTIVATION_RATIO_LOCK
);
122 wrmsr(MSR_TURBO_ACTIVATION_RATIO
, msr
);
125 static struct device_operations cpu_dev_ops
= {
126 .init
= each_cpu_init
,
129 static const struct cpu_device_id cpu_table
[] = {
130 {X86_VENDOR_INTEL
, CPUID_COOPERLAKE_SP_A0
, CPUID_EXACT_MATCH_MASK
},
131 {X86_VENDOR_INTEL
, CPUID_COOPERLAKE_SP_A1
, CPUID_EXACT_MATCH_MASK
},
135 static const struct cpu_driver driver __cpu_driver
= {
137 .id_table
= cpu_table
,
140 static void set_max_turbo_freq(void)
147 /* Check for configurable TDP option */
148 if (get_turbo_state() == TURBO_ENABLED
) {
149 msr
= rdmsr(MSR_TURBO_RATIO_LIMIT
);
150 perf_ctl
.lo
= (msr
.lo
& 0xff) << 8;
151 } else if (cpu_config_tdp_levels()) {
152 /* Set to nominal TDP ratio */
153 msr
= rdmsr(MSR_CONFIG_TDP_NOMINAL
);
154 perf_ctl
.lo
= (msr
.lo
& 0xff) << 8;
156 /* Platform Info bits 15:8 give max ratio */
157 msr
= rdmsr(MSR_PLATFORM_INFO
);
158 perf_ctl
.lo
= msr
.lo
& 0xff00;
160 wrmsr(IA32_PERF_CTL
, perf_ctl
);
162 printk(BIOS_DEBUG
, "cpu: frequency set to %d\n",
163 ((perf_ctl
.lo
>> 8) & 0xff) * CONFIG_CPU_BCLK_MHZ
);
168 * Do essential initialization tasks before APs can be fired up
170 static void pre_mp_init(void)
172 x86_setup_mtrrs_with_detect();
176 static int get_thread_count(void)
178 unsigned int num_phys
= 0, num_virts
= 0;
180 cpu_read_topology(&num_phys
, &num_virts
);
181 printk(BIOS_SPEW
, "Detected %u cores and %u threads\n", num_phys
, num_virts
);
183 * Currently we do not know a way to figure out how many CPUs we have total
184 * on multi-socketed. So we pretend all sockets are populated with CPUs with
185 * same thread/core fusing.
186 * TODO: properly figure out number of active sockets OR refactor MPinit code
187 * to remove requirements of having to know total number of CPUs in advance.
189 return num_virts
* CONFIG_MAX_SOCKET
;
192 static void post_mp_init(void)
195 set_max_turbo_freq();
197 if (CONFIG(HAVE_SMI_HANDLER
)) {
199 if (get_lockdown_config() == CHIPSET_LOCKDOWN_COREBOOT
)
204 static const struct mp_ops mp_ops
= {
205 .pre_mp_init
= pre_mp_init
,
206 .get_cpu_count
= get_thread_count
,
207 .get_smm_info
= get_smm_info
,
208 .pre_mp_smm_init
= smm_southbridge_clear_state
,
209 .relocation_handler
= smm_relocation_handler
,
210 .get_microcode_info
= get_microcode_info
,
211 .post_mp_init
= post_mp_init
,
214 void mp_init_cpus(struct bus
*bus
)
216 microcode_patch
= intel_microcode_find();
218 if (!microcode_patch
)
219 printk(BIOS_ERR
, "microcode not found in CBFS!\n");
221 intel_microcode_load_unlocked(microcode_patch
);
223 /* TODO: Handle mp_init_with_smm failure? */
224 mp_init_with_smm(bus
, &mp_ops
);
227 * chip_config is used in cpu device callback. Other than cpu 0,
228 * rest of the CPU devices do not have chip_info updated.
230 chip_config
= bus
->dev
->chip_info
;