1 /* SPDX-License-Identifier: GPL-2.0-or-later */
4 #include <console/console.h>
5 #include <console/debug.h>
7 #include <cpu/intel/cpu_ids.h>
8 #include <cpu/x86/mtrr.h>
9 #include <cpu/x86/mp.h>
10 #include <cpu/intel/common/common.h>
11 #include <cpu/intel/microcode.h>
12 #include <cpu/intel/turbo.h>
13 #include <cpu/intel/smm_reloc.h>
14 #include <cpu/intel/em64t101_save_state.h>
15 #include <intelblocks/cpulib.h>
16 #include <intelpch/lockdown.h>
19 #include <soc/soc_util.h>
20 #include <soc/smmrelocate.h>
25 static const config_t
*chip_config
= NULL
;
27 bool cpu_soc_is_in_untrusted_mode(void)
29 /* IA_UNTRUSTED_MODE is not supported in Skylake */
33 void cpu_soc_bios_done(void)
35 /* IA_UNTRUSTED_MODE is not supported in Skylake */
38 static void xeon_configure_mca(void)
41 struct cpuid_result cpuid_regs
;
43 /* Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
44 * and CPUID.(EAX=1):EDX[14]==1 MCA*/
45 cpuid_regs
= cpuid(1);
46 if ((cpuid_regs
.edx
& (1<<7 | 1<<14)) != (1<<7 | 1<<14))
49 msr
= rdmsr(IA32_MCG_CAP
);
50 if (msr
.lo
& IA32_MCG_CAP_CTL_P_MASK
) {
51 /* Enable all error logging */
52 msr
.lo
= msr
.hi
= 0xffffffff;
53 wrmsr(IA32_MCG_CTL
, msr
);
56 /* TODO(adurbin): This should only be done on a cold boot. Also, some
57 of these banks are core vs package scope. For now every CPU clears
63 * By providing a pointer to the microcode MPinit will update the MCU
64 * when necessary and skip the update if microcode already has been loaded.
66 * When FSP-S is provided with UPD PcdCpuMicrocodePatchBase it will update
67 * the microcode. Since coreboot is able to do the same, don't set the UPD
68 * and let coreboot handle microcode updates.
70 * FSP-S updates microcodes serialized, so do the same.
73 static void get_microcode_info(const void **microcode
, int *parallel
)
75 *microcode
= intel_microcode_find();
79 static void xeon_sp_core_init(struct device
*cpu
)
83 printk(BIOS_INFO
, "%s: cpu: %lu, apic_id: 0x%x, package_id: 0x%x\n",
84 __func__
, cpu_index(), cpu
->path
.apic
.apic_id
,
85 cpu
->path
.apic
.package_id
);
88 /* set MSR_PKG_CST_CONFIG_CONTROL - scope per core */
90 msr
.lo
= (PKG_CSTATE_NO_LIMIT
| CFG_LOCK_ENABLE
);
91 wrmsr(MSR_PKG_CST_CONFIG_CONTROL
, msr
);
93 /* Enable Energy Perf Bias Access, Dynamic switching and lock MSR */
94 msr
= rdmsr(MSR_POWER_CTL
);
95 msr
.lo
&= ~(POWER_CTL_C1E_MASK
| BIT2
);
96 msr
.lo
|= ENERGY_PERF_BIAS_ACCESS_ENABLE
;
97 msr
.lo
|= PWR_PERF_TUNING_DYN_SWITCHING_ENABLE
;
98 msr
.lo
|= LTR_IIO_DISABLE
;
99 msr
.lo
|= PROCHOT_LOCK_ENABLE
;
100 wrmsr(MSR_POWER_CTL
, msr
);
102 /* Set P-State ratio */
103 msr
= rdmsr(MSR_IA32_PERF_CTRL
);
104 msr
.lo
&= ~PSTATE_REQ_MASK
;
105 msr
.lo
|= (chip_config
->pstate_req_ratio
<< PSTATE_REQ_SHIFT
);
106 wrmsr(MSR_IA32_PERF_CTRL
, msr
);
109 * Set HWP base feature, EPP reg enumeration, lock thermal and msr
110 * TODO: Set LOCK_MISC_PWR_MGMT_MSR, Unexpected Exception if you
111 * lock & issue wrmsr on every thread
112 * This is package level MSR. Need to check if it updates correctly on
113 * multi-socket platform.
115 msr
= rdmsr(MSR_MISC_PWR_MGMT
);
116 if (!(msr
.lo
& LOCK_MISC_PWR_MGMT_MSR
)) { /* if already locked skip update */
117 msr
.lo
= (HWP_ENUM_ENABLE
| HWP_EPP_ENUM_ENABLE
| LOCK_MISC_PWR_MGMT_MSR
|
119 wrmsr(MSR_MISC_PWR_MGMT
, msr
);
122 msr
= rdmsr(MSR_VR_MISC_CONFIG
);
124 wrmsr(MSR_VR_MISC_CONFIG
, msr
);
126 /* Set current limit lock */
127 msr
= rdmsr(MSR_VR_CURRENT_CONFIG
);
128 msr
.lo
|= CURRENT_LIMIT_LOCK
;
129 wrmsr(MSR_VR_CURRENT_CONFIG
, msr
);
131 /* Set Turbo Ratio Limits */
132 msr
.lo
= chip_config
->turbo_ratio_limit
& 0xffffffff;
133 msr
.hi
= (chip_config
->turbo_ratio_limit
>> 32) & 0xffffffff;
134 wrmsr(MSR_TURBO_RATIO_LIMIT
, msr
);
136 /* Set Turbo Ratio Limit Cores */
137 msr
.lo
= chip_config
->turbo_ratio_limit_cores
& 0xffffffff;
138 msr
.hi
= (chip_config
->turbo_ratio_limit_cores
>> 32) & 0xffffffff;
139 wrmsr(MSR_TURBO_RATIO_LIMIT_CORES
, msr
);
141 /* set Turbo Activation ratio - scope package */
142 msr
= rdmsr(MSR_TURBO_ACTIVATION_RATIO
);
143 msr
.lo
|= MAX_NON_TURBO_RATIO
;
144 msr
.lo
|= BIT31
; /* Lock it */
145 wrmsr(MSR_TURBO_ACTIVATION_RATIO
, msr
);
148 msr
= rdmsr(MSR_CONFIG_TDP_CONTROL
);
149 msr
.lo
|= BIT31
; /* Lock it */
150 wrmsr(MSR_CONFIG_TDP_CONTROL
, msr
);
152 msr
= rdmsr(IA32_MISC_ENABLE
);
153 /* Enable Fast Strings */
154 msr
.lo
|= FAST_STRINGS_ENABLE_BIT
;
155 wrmsr(IA32_MISC_ENABLE
, msr
);
157 /* Set energy policy */
158 msr_t msr1
= rdmsr(MSR_ENERGY_PERF_BIAS_CONFIG
);
159 msr
.lo
= (msr1
.lo
& EPB_ENERGY_POLICY_MASK
) >> EPB_ENERGY_POLICY_SHIFT
;
161 wrmsr(MSR_IA32_ENERGY_PERF_BIAS
, msr
);
163 if (!intel_ht_sibling()) {
165 msr
= rdmsr(MSR_SNC_CONFIG
);
166 msr
.lo
|= BIT28
; /* Lock it */
167 wrmsr(MSR_SNC_CONFIG
, msr
);
173 configure_tcc_thermal_target();
175 /* Enable speed step. */
176 if (get_turbo_state() == TURBO_ENABLED
) {
177 msr
= rdmsr(IA32_MISC_ENABLE
);
178 msr
.lo
|= SPEED_STEP_ENABLE_BIT
;
179 wrmsr(IA32_MISC_ENABLE
, msr
);
182 /* Clear out pending MCEs */
183 xeon_configure_mca();
192 static struct device_operations cpu_dev_ops
= {
193 .init
= xeon_sp_core_init
,
196 static const struct cpu_device_id cpu_table
[] = {
197 /* Skylake-SP A0/A1 CPUID 0x506f0*/
198 {X86_VENDOR_INTEL
, CPUID_SKYLAKE_SP_A0_A1
, CPUID_EXACT_MATCH_MASK
},
199 /* Skylake-SP B0 CPUID 0x506f1*/
200 {X86_VENDOR_INTEL
, CPUID_SKYLAKE_SP_B0
, CPUID_EXACT_MATCH_MASK
},
201 /* Skylake-SP 4 CPUID 0x50654*/
202 {X86_VENDOR_INTEL
, CPUID_SKYLAKE_SP_4
, CPUID_EXACT_MATCH_MASK
},
206 static const struct cpu_driver driver __cpu_driver
= {
208 .id_table
= cpu_table
,
211 static void set_max_turbo_freq(void)
218 /* Check for configurable TDP option */
219 if (get_turbo_state() == TURBO_ENABLED
) {
220 msr
= rdmsr(MSR_TURBO_RATIO_LIMIT
);
221 perf_ctl
.lo
= (msr
.lo
& 0xff) << 8;
222 } else if (cpu_config_tdp_levels()) {
223 /* Set to nominal TDP ratio */
224 msr
= rdmsr(MSR_CONFIG_TDP_NOMINAL
);
225 perf_ctl
.lo
= (msr
.lo
& 0xff) << 8;
227 /* Platform Info bits 15:8 give max ratio */
228 msr
= rdmsr(MSR_PLATFORM_INFO
);
229 perf_ctl
.lo
= msr
.lo
& 0xff00;
231 wrmsr(IA32_PERF_CTL
, perf_ctl
);
233 printk(BIOS_DEBUG
, "cpu: frequency set to %d\n",
234 ((perf_ctl
.lo
>> 8) & 0xff) * CONFIG_CPU_BCLK_MHZ
);
239 * Do essential initialization tasks before APs can be fired up
241 * Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
242 * creates the MTRR solution that the APs will use. Otherwise APs will try to
243 * apply the incomplete solution as the BSP is calculating it.
245 static void pre_mp_init(void)
247 printk(BIOS_DEBUG
, "%s: entry\n", __func__
);
249 x86_setup_mtrrs_with_detect();
253 static void post_mp_init(void)
256 set_max_turbo_freq();
258 if (CONFIG(HAVE_SMI_HANDLER
)) {
260 if (get_lockdown_config() == CHIPSET_LOCKDOWN_COREBOOT
)
266 * CPU initialization recipe
268 static const struct mp_ops mp_ops
= {
269 .pre_mp_init
= pre_mp_init
,
270 .get_cpu_count
= get_platform_thread_count
,
271 .get_smm_info
= get_smm_info
,
272 .pre_mp_smm_init
= smm_southbridge_clear_state
,
273 .relocation_handler
= smm_relocation_handler
,
274 .get_microcode_info
= get_microcode_info
,
275 .post_mp_init
= post_mp_init
,
278 void mp_init_cpus(struct bus
*bus
)
282 const void *microcode_patch
= intel_microcode_find();
284 if (!microcode_patch
)
285 printk(BIOS_ERR
, "microcode not found in CBFS!\n");
287 intel_microcode_load_unlocked(microcode_patch
);
290 * This gets used in cpu device callback. Other than cpu 0,
291 * rest of the CPU devices do not have
292 * chip_info updated. Global chip_config is used as workaround
294 chip_config
= bus
->dev
->chip_info
;
296 /* calls src/cpu/x86/mp_init.c */
297 /* TODO: Handle mp_init_with_smm failure? */
298 mp_init_with_smm(bus
, &mp_ops
);