1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <acpi/acpigen.h>
5 #include <console/console.h>
6 #include <console/debug.h>
8 #include <cpu/intel/cpu_ids.h>
9 #include <cpu/intel/common/common.h>
10 #include <cpu/intel/em64t101_save_state.h>
11 #include <cpu/intel/microcode.h>
12 #include <cpu/intel/smm_reloc.h>
13 #include <cpu/intel/turbo.h>
14 #include <cpu/x86/lapic.h>
15 #include <cpu/x86/mp.h>
16 #include <cpu/x86/mtrr.h>
17 #include <device/pci_mmio_cfg.h>
18 #include <intelblocks/cpulib.h>
19 #include <intelblocks/mp_init.h>
20 #include <intelpch/lockdown.h>
22 #include <soc/pci_devs.h>
24 #include <soc/soc_util.h>
25 #include <soc/smmrelocate.h>
30 static const void *microcode_patch
;
32 static const config_t
*chip_config
= NULL
;
34 bool cpu_soc_is_in_untrusted_mode(void)
39 void cpu_soc_bios_done(void)
43 static void xeon_configure_mca(void)
46 struct cpuid_result cpuid_regs
;
49 * Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
50 * and CPUID.(EAX=1):EDX[14]==1 MCA
52 cpuid_regs
= cpuid(1);
53 if ((cpuid_regs
.edx
& (1 << 7 | 1 << 14)) != (1 << 7 | 1 << 14))
56 msr
= rdmsr(IA32_MCG_CAP
);
57 if (msr
.lo
& IA32_MCG_CAP_CTL_P_MASK
) {
58 /* Enable all error logging */
59 msr
.lo
= msr
.hi
= 0xffffffff;
60 wrmsr(IA32_MCG_CTL
, msr
);
67 * On server platforms the FIT mechanism only updates the microcode on
68 * the BSP. Loading MCU on AP in parallel seems to fail in 10% of the cases
69 * so do it serialized.
71 void get_microcode_info(const void **microcode
, int *parallel
)
73 *microcode
= intel_microcode_find();
77 static void each_cpu_init(struct device
*cpu
)
81 printk(BIOS_SPEW
, "%s dev: %s, cpu: %lu, apic_id: 0x%x, package_id: 0x%x\n",
82 __func__
, dev_path(cpu
), cpu_index(), cpu
->path
.apic
.apic_id
,
83 cpu
->path
.apic
.package_id
);
86 * Enable PWR_PERF_PLTFRM_OVR and PROCHOT_LOCK.
87 * The value set by FSP is 20_005f, we set it to 1a_00a4_005b.
89 msr
= rdmsr(MSR_POWER_CTL
);
90 msr
.lo
|= (0x16 << RESERVED1_SHIFT
) | PWR_PERF_PLTFRM_OVR
| PROCHOT_LOCK
;
92 wrmsr(MSR_POWER_CTL
, msr
);
94 /* Set static, idle, dynamic load line impedance */
95 msr
= rdmsr(MSR_VR_MISC_CONFIG
);
97 wrmsr(MSR_VR_MISC_CONFIG
, msr
);
99 /* Set current limitation */
100 msr
= rdmsr(MSR_VR_CURRENT_CONFIG
);
102 msr
.lo
|= CURRENT_LIMIT_LOCK
;
103 wrmsr(MSR_VR_CURRENT_CONFIG
, msr
);
105 /* Set Turbo Ratio Limits */
106 msr
.lo
= chip_config
->turbo_ratio_limit
& 0xffffffff;
107 msr
.hi
= (chip_config
->turbo_ratio_limit
>> 32) & 0xffffffff;
108 wrmsr(MSR_TURBO_RATIO_LIMIT
, msr
);
110 /* Set Turbo Ratio Limit Cores */
111 msr
.lo
= chip_config
->turbo_ratio_limit_cores
& 0xffffffff;
112 msr
.hi
= (chip_config
->turbo_ratio_limit_cores
>> 32) & 0xffffffff;
113 wrmsr(MSR_TURBO_RATIO_LIMIT_CORES
, msr
);
115 /* Set energy policy */
116 msr
= rdmsr(MSR_ENERGY_PERF_BIAS_CONFIG
);
118 wrmsr(MSR_ENERGY_PERF_BIAS_CONFIG
, msr
);
122 wrmsr(PACKAGE_RAPL_LIMIT
, msr
);
125 * Set HWP base feature, EPP reg enumeration, lock thermal and msr
126 * This is package level MSR. Need to check if it updates correctly on
127 * multi-socket platform.
129 msr
= rdmsr(MSR_MISC_PWR_MGMT
);
130 if (!(msr
.lo
& LOCK_MISC_PWR_MGMT_MSR
)) { /* if already locked skip update */
131 msr
.lo
= (HWP_ENUM_ENABLE
| HWP_EPP_ENUM_ENABLE
| LOCK_MISC_PWR_MGMT_MSR
133 wrmsr(MSR_MISC_PWR_MGMT
, msr
);
136 /* Enable Fast Strings */
137 msr
= rdmsr(IA32_MISC_ENABLE
);
138 msr
.lo
|= FAST_STRINGS_ENABLE_BIT
;
139 wrmsr(IA32_MISC_ENABLE
, msr
);
143 /* Enable speed step. */
144 if (get_turbo_state() == TURBO_ENABLED
) {
145 msr
= rdmsr(IA32_MISC_ENABLE
);
146 msr
.lo
|= SPEED_STEP_ENABLE_BIT
;
147 wrmsr(IA32_MISC_ENABLE
, msr
);
150 /* Lock the supported Cstates */
151 msr
= rdmsr(MSR_PKG_CST_CONFIG_CONTROL
);
152 msr
.lo
|= CST_CFG_LOCK_MASK
;
153 wrmsr(MSR_PKG_CST_CONFIG_CONTROL
, msr
);
155 /* Disable all writes to overclocking limits MSR */
156 msr
= rdmsr(MSR_FLEX_RATIO
);
157 msr
.lo
|= MSR_FLEX_RATIO_OC_LOCK
;
158 wrmsr(MSR_FLEX_RATIO
, msr
);
160 /* Lock Power Plane Limit MSR */
161 msr
= rdmsr(MSR_DRAM_PLANE_POWER_LIMIT
);
162 msr
.hi
|= MSR_HI_PP_PWR_LIM_LOCK
;
163 wrmsr(MSR_DRAM_PLANE_POWER_LIMIT
, msr
);
165 /* Clear out pending MCEs */
166 xeon_configure_mca();
169 // set_vmx_and_lock();
170 /* only lock. let vmx enable by FSP */
171 set_feature_ctrl_lock();
174 static struct device_operations cpu_dev_ops
= {
175 .init
= each_cpu_init
,
178 static const struct cpu_device_id cpu_table
[] = {
179 {X86_VENDOR_INTEL
, CPUID_SAPPHIRERAPIDS_SP_D
, CPUID_EXACT_MATCH_MASK
},
180 {X86_VENDOR_INTEL
, CPUID_SAPPHIRERAPIDS_SP_E0
, CPUID_EXACT_MATCH_MASK
},
181 {X86_VENDOR_INTEL
, CPUID_SAPPHIRERAPIDS_SP_E2
, CPUID_EXACT_MATCH_MASK
},
182 {X86_VENDOR_INTEL
, CPUID_SAPPHIRERAPIDS_SP_E3
, CPUID_EXACT_MATCH_MASK
},
183 {X86_VENDOR_INTEL
, CPUID_SAPPHIRERAPIDS_SP_E4
, CPUID_EXACT_MATCH_MASK
},
184 {X86_VENDOR_INTEL
, CPUID_SAPPHIRERAPIDS_SP_Ex
, CPUID_EXACT_MATCH_MASK
},
188 static const struct cpu_driver driver __cpu_driver
= {
190 .id_table
= cpu_table
,
193 static void set_max_turbo_freq(void)
200 /* Check for configurable TDP option */
201 if (get_turbo_state() == TURBO_ENABLED
) {
202 msr
= rdmsr(MSR_TURBO_RATIO_LIMIT
);
203 perf_ctl
.lo
= (msr
.lo
& 0xff) << 8;
204 } else if (cpu_config_tdp_levels()) {
205 /* Set to nominal TDP ratio */
206 msr
= rdmsr(MSR_CONFIG_TDP_NOMINAL
);
207 perf_ctl
.lo
= (msr
.lo
& 0xff) << 8;
209 /* Platform Info bits 15:8 give max ratio */
210 msr
= rdmsr(MSR_PLATFORM_INFO
);
211 perf_ctl
.lo
= msr
.lo
& 0xff00;
213 wrmsr(IA32_PERF_CTL
, perf_ctl
);
215 printk(BIOS_DEBUG
, "cpu: frequency set to %d\n",
216 ((perf_ctl
.lo
>> 8) & 0xff) * CONFIG_CPU_BCLK_MHZ
);
221 * Do essential initialization tasks before APs can be fired up
223 static void pre_mp_init(void)
225 x86_setup_mtrrs_with_detect();
229 static int get_thread_count(void)
231 unsigned int num_phys
= 0, num_virts
= 0;
233 cpu_read_topology(&num_phys
, &num_virts
);
234 printk(BIOS_SPEW
, "Detected %u cores and %u threads\n", num_phys
, num_virts
);
235 return num_virts
* soc_get_num_cpus();
238 static void post_mp_init(void)
241 set_max_turbo_freq();
243 if (CONFIG(HAVE_SMI_HANDLER
)) {
245 if (get_lockdown_config() == CHIPSET_LOCKDOWN_COREBOOT
)
250 static const struct mp_ops mp_ops
= {
251 .pre_mp_init
= pre_mp_init
,
252 .get_cpu_count
= get_thread_count
,
253 #if CONFIG(HAVE_SMI_HANDLER)
254 .get_smm_info
= get_smm_info
,
255 .pre_mp_smm_init
= smm_southbridge_clear_state
,
256 .relocation_handler
= smm_relocation_handler
,
258 .get_microcode_info
= get_microcode_info
,
259 .post_mp_init
= post_mp_init
,
262 void mp_init_cpus(struct bus
*bus
)
265 * chip_config is used in cpu device callback. Other than cpu 0,
266 * rest of the CPU devices do not have chip_info updated.
268 chip_config
= bus
->dev
->chip_info
;
270 microcode_patch
= intel_microcode_find();
272 if (!microcode_patch
)
273 printk(BIOS_ERR
, "microcode not found in CBFS!\n");
275 intel_microcode_load_unlocked(microcode_patch
);
277 if (mp_init_with_smm(bus
, &mp_ops
) < 0)
278 printk(BIOS_ERR
, "MP initialization failure.\n");