mb/starlabs/*: Correct config for SATA DEVSLP GPIO
[coreboot2.git] / src / soc / intel / alderlake / cpu.c
blob9e4ed52c447493833fe1a9fc923b9bf562b894ff
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 /*
4 * This file is created based on Intel Alder Lake Processor CPU Datasheet
5 * Document number: 619501
6 * Chapter number: 14
7 */
9 #include <console/console.h>
10 #include <device/pci.h>
11 #include <device/pci_ids.h>
12 #include <cpu/x86/mp.h>
13 #include <cpu/x86/msr.h>
14 #include <cpu/intel/microcode.h>
15 #include <cpu/intel/smm_reloc.h>
16 #include <cpu/intel/turbo.h>
17 #include <cpu/intel/common/common.h>
18 #include <fsp/api.h>
19 #include <intelblocks/cpulib.h>
20 #include <intelblocks/mp_init.h>
21 #include <intelblocks/msr.h>
22 #include <intelblocks/acpi.h>
23 #include <soc/cpu.h>
24 #include <soc/msr.h>
25 #include <soc/pci_devs.h>
26 #include <soc/soc_chip.h>
27 #include <static.h>
28 #include <types.h>
30 enum alderlake_model {
31 ADL_MODEL_P_M = 0x9A,
32 ADL_MODEL_N = 0xBE,
35 bool cpu_soc_is_in_untrusted_mode(void)
37 msr_t msr;
39 msr = rdmsr(MSR_BIOS_DONE);
40 return !!(msr.lo & ENABLE_IA_UNTRUSTED);
43 void cpu_soc_bios_done(void)
45 msr_t msr;
47 msr = rdmsr(MSR_BIOS_DONE);
48 msr.lo |= ENABLE_IA_UNTRUSTED;
49 wrmsr(MSR_BIOS_DONE, msr);
52 static void soc_fsp_load(void)
54 fsps_load();
57 static void configure_misc(void)
59 msr_t msr;
61 const config_t *conf = config_of_soc();
63 msr = rdmsr(IA32_MISC_ENABLE);
64 msr.lo |= (1 << 0); /* Fast String enable */
65 msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
66 wrmsr(IA32_MISC_ENABLE, msr);
68 /* Set EIST status */
69 cpu_set_eist(conf->eist_enable);
71 /* Disable Thermal interrupts */
72 msr.lo = 0;
73 msr.hi = 0;
74 wrmsr(IA32_THERM_INTERRUPT, msr);
76 /* Enable package critical interrupt only */
77 msr.lo = 1 << 4;
78 msr.hi = 0;
79 wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
81 /* Enable PROCHOT and Energy/Performance Bias control */
82 msr = rdmsr(MSR_POWER_CTL);
83 msr.lo |= (1 << 0); /* Enable Bi-directional PROCHOT as an input */
84 msr.lo |= (1 << 23); /* Lock it */
85 msr.lo |= (1 << 18); /* Energy/Performance Bias control */
86 wrmsr(MSR_POWER_CTL, msr);
89 enum core_type get_soc_cpu_type(void)
91 struct cpuinfo_x86 cpuinfo;
93 if (cpu_is_hybrid_supported())
94 return cpu_get_cpu_type();
96 get_fms(&cpuinfo, cpuid_eax(1));
98 if (cpuinfo.x86 == 0x6 && cpuinfo.x86_model == ADL_MODEL_N)
99 return CPUID_CORE_TYPE_INTEL_ATOM;
100 else
101 return CPUID_CORE_TYPE_INTEL_CORE;
104 bool soc_is_nominal_freq_supported(void)
106 return true;
109 /* All CPUs including BSP will run the following function. */
110 void soc_core_init(struct device *cpu)
112 /* Clear out pending MCEs */
113 /* TODO(adurbin): This should only be done on a cold boot. Also, some
114 * of these banks are core vs package scope. For now every CPU clears
115 * every bank. */
116 mca_configure();
118 enable_lapic_tpr();
120 /* Configure Enhanced SpeedStep and Thermal Sensors */
121 configure_misc();
123 enable_pm_timer_emulation();
125 /* Enable Direct Cache Access */
126 configure_dca_cap();
128 /* Set core type in struct cpu_info */
129 set_dev_core_type();
131 /* Set energy policy. The "normal" EPB (6) is not suitable for Alder
132 * Lake or Raptor Lake CPUs, as this results in higher uncore power. */
133 set_energy_perf_bias(7);
135 const config_t *conf = config_of_soc();
136 /* Set energy-performance preference */
137 if (conf->enable_energy_perf_pref)
138 if (check_energy_perf_cap())
139 set_energy_perf_pref(conf->energy_perf_pref_value);
140 /* Enable Turbo */
141 enable_turbo();
143 if (CONFIG(INTEL_TME) && is_tme_supported())
144 set_tme_core_activate();
147 static void per_cpu_smm_trigger(void)
149 /* Relocate the SMM handler. */
150 smm_relocate();
153 static void pre_mp_init(void)
155 soc_fsp_load();
157 const config_t *conf = config_of_soc();
158 if (conf->enable_energy_perf_pref) {
159 if (check_energy_perf_cap())
160 enable_energy_perf_pref();
161 else
162 printk(BIOS_WARNING, "Energy Performance Preference not supported!\n");
166 static void post_mp_init(void)
168 /* Set Max Ratio */
169 cpu_set_max_ratio();
172 * 1. Now that all APs have been relocated as well as the BSP let SMIs
173 * start flowing.
174 * 2. Skip enabling power button SMI and enable it after BS_CHIPS_INIT
175 * to avoid shutdown hang due to lack of init on certain IP in FSP-S.
177 global_smi_enable_no_pwrbtn();
180 static const struct mp_ops mp_ops = {
182 * Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
183 * that are set prior to ramstage.
184 * Real MTRRs programming are being done after resource allocation.
186 .pre_mp_init = pre_mp_init,
187 .get_cpu_count = get_cpu_count,
188 .get_smm_info = smm_info,
189 .get_microcode_info = get_microcode_info,
190 .pre_mp_smm_init = smm_initialize,
191 .per_cpu_smm_trigger = per_cpu_smm_trigger,
192 .relocation_handler = smm_relocation_handler,
193 .post_mp_init = post_mp_init,
196 void mp_init_cpus(struct bus *cpu_bus)
198 /* TODO: Handle mp_init_with_smm failure? */
199 mp_init_with_smm(cpu_bus, &mp_ops);
201 /* Thermal throttle activation offset */
202 configure_tcc_thermal_target();
205 enum adl_cpu_type get_adl_cpu_type(void)
207 const uint16_t adl_m_mch_ids[] = {
208 PCI_DID_INTEL_ADL_M_ID_1,
209 PCI_DID_INTEL_ADL_M_ID_2,
211 const uint16_t adl_p_mch_ids[] = {
212 PCI_DID_INTEL_ADL_P_ID_1,
213 PCI_DID_INTEL_ADL_P_ID_3,
214 PCI_DID_INTEL_ADL_P_ID_4,
215 PCI_DID_INTEL_ADL_P_ID_5,
216 PCI_DID_INTEL_ADL_P_ID_6,
217 PCI_DID_INTEL_ADL_P_ID_7,
218 PCI_DID_INTEL_ADL_P_ID_8,
219 PCI_DID_INTEL_ADL_P_ID_9,
220 PCI_DID_INTEL_ADL_P_ID_10
222 const uint16_t adl_s_mch_ids[] = {
223 PCI_DID_INTEL_ADL_S_ID_1,
224 PCI_DID_INTEL_ADL_S_ID_2,
225 PCI_DID_INTEL_ADL_S_ID_3,
226 PCI_DID_INTEL_ADL_S_ID_4,
227 PCI_DID_INTEL_ADL_S_ID_5,
228 PCI_DID_INTEL_ADL_S_ID_6,
229 PCI_DID_INTEL_ADL_S_ID_7,
230 PCI_DID_INTEL_ADL_S_ID_8,
231 PCI_DID_INTEL_ADL_S_ID_9,
232 PCI_DID_INTEL_ADL_S_ID_10,
233 PCI_DID_INTEL_ADL_S_ID_11,
234 PCI_DID_INTEL_ADL_S_ID_12,
235 PCI_DID_INTEL_ADL_S_ID_13,
236 PCI_DID_INTEL_ADL_S_ID_14,
237 PCI_DID_INTEL_ADL_S_ID_15,
240 const uint16_t adl_n_mch_ids[] = {
241 PCI_DID_INTEL_ADL_N_ID_1,
242 PCI_DID_INTEL_ADL_N_ID_2,
243 PCI_DID_INTEL_ADL_N_ID_3,
244 PCI_DID_INTEL_ADL_N_ID_4,
245 PCI_DID_INTEL_ADL_N_ID_5,
246 PCI_DID_INTEL_ADL_N_ID_6,
247 PCI_DID_INTEL_ADL_N_ID_7,
248 PCI_DID_INTEL_ADL_N_ID_8,
249 PCI_DID_INTEL_ADL_N_ID_9,
252 const uint16_t rpl_hx_mch_ids[] = {
253 PCI_DID_INTEL_RPL_HX_ID_1,
254 PCI_DID_INTEL_RPL_HX_ID_2,
255 PCI_DID_INTEL_RPL_HX_ID_3,
256 PCI_DID_INTEL_RPL_HX_ID_4,
257 PCI_DID_INTEL_RPL_HX_ID_5,
258 PCI_DID_INTEL_RPL_HX_ID_6,
259 PCI_DID_INTEL_RPL_HX_ID_7,
260 PCI_DID_INTEL_RPL_HX_ID_8,
263 const uint16_t rpl_s_mch_ids[] = {
264 PCI_DID_INTEL_RPL_S_ID_1,
265 PCI_DID_INTEL_RPL_S_ID_2,
266 PCI_DID_INTEL_RPL_S_ID_3,
267 PCI_DID_INTEL_RPL_S_ID_4,
268 PCI_DID_INTEL_RPL_S_ID_5
271 const uint16_t rpl_p_mch_ids[] = {
272 PCI_DID_INTEL_RPL_P_ID_1,
273 PCI_DID_INTEL_RPL_P_ID_2,
274 PCI_DID_INTEL_RPL_P_ID_3,
275 PCI_DID_INTEL_RPL_P_ID_4,
276 PCI_DID_INTEL_RPL_P_ID_5,
277 PCI_DID_INTEL_RPL_P_ID_6,
278 PCI_DID_INTEL_RPL_P_ID_7,
279 PCI_DID_INTEL_RPL_P_ID_8,
282 const uint16_t mchid = pci_s_read_config16(PCI_DEV(0, PCI_SLOT(SA_DEVFN_ROOT),
283 PCI_FUNC(SA_DEVFN_ROOT)),
284 PCI_DEVICE_ID);
286 for (size_t i = 0; i < ARRAY_SIZE(adl_p_mch_ids); i++) {
287 if (adl_p_mch_ids[i] == mchid)
288 return ADL_P;
291 for (size_t i = 0; i < ARRAY_SIZE(adl_m_mch_ids); i++) {
292 if (adl_m_mch_ids[i] == mchid)
293 return ADL_M;
296 for (size_t i = 0; i < ARRAY_SIZE(adl_s_mch_ids); i++) {
297 if (adl_s_mch_ids[i] == mchid)
298 return ADL_S;
301 for (size_t i = 0; i < ARRAY_SIZE(rpl_s_mch_ids); i++) {
302 if (rpl_s_mch_ids[i] == mchid)
303 return RPL_S;
306 for (size_t i = 0; i < ARRAY_SIZE(adl_n_mch_ids); i++) {
307 if (adl_n_mch_ids[i] == mchid)
308 return ADL_N;
311 for (size_t i = 0; i < ARRAY_SIZE(rpl_hx_mch_ids); i++) {
312 if (rpl_hx_mch_ids[i] == mchid)
313 return RPL_HX;
316 for (size_t i = 0; i < ARRAY_SIZE(rpl_p_mch_ids); i++) {
317 if (rpl_p_mch_ids[i] == mchid)
318 return RPL_P;
321 return ADL_UNKNOWN;
324 uint8_t get_supported_lpm_mask(void)
326 const config_t *conf = config_of_soc();
327 if (!conf->s0ix_enable)
328 return 0;
330 enum adl_cpu_type type = get_adl_cpu_type();
331 switch (type) {
332 case ADL_M: /* fallthrough */
333 case ADL_N:
334 case ADL_P:
335 case RPL_P:
336 return LPM_S0i2_0 | LPM_S0i3_0;
337 case ADL_S:
338 case RPL_S:
339 case RPL_HX:
340 return LPM_S0i2_0 | LPM_S0i2_1;
341 default:
342 printk(BIOS_ERR, "Unknown ADL CPU type: %d\n", type);
343 return 0;
347 int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id)
349 if (!CONFIG(CHROMEOS))
350 return 0;
352 * Locked RO Descriptor Implications:
354 * - A locked descriptor signals the RO binary is fixed; the FIT will load the
355 * RO's microcode during system reset.
356 * - Attempts to load newer microcode from the RW CBFS will cause a boot-time
357 * delay (~60ms, core-dependent), as the microcode must be reloaded on BSP+APs.
358 * - The kernel can load microcode updates without impacting AP FW boot time.
359 * - Skipping RW CBFS microcode loading is low-risk when the RO is locked,
360 * prioritizing fast boot times.
362 if (CONFIG(LOCK_MANAGEMENT_ENGINE) && current_patch_id)
363 return 1;
365 return 0;