mb/system76/cml-u/dt: Make use of chipset devicetree
[coreboot.git] / src / soc / intel / alderlake / cpu.c
blob92b253216ae56f4d7c9b6d644babe03f77419a1f
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 /*
4 * This file is created based on Intel Alder Lake Processor CPU Datasheet
5 * Document number: 619501
6 * Chapter number: 14
7 */
9 #include <console/console.h>
10 #include <device/pci.h>
11 #include <device/pci_ids.h>
12 #include <cpu/x86/mp.h>
13 #include <cpu/x86/msr.h>
14 #include <cpu/intel/microcode.h>
15 #include <cpu/intel/smm_reloc.h>
16 #include <cpu/intel/turbo.h>
17 #include <cpu/intel/common/common.h>
18 #include <fsp/api.h>
19 #include <intelblocks/cpulib.h>
20 #include <intelblocks/mp_init.h>
21 #include <intelblocks/msr.h>
22 #include <intelblocks/acpi.h>
23 #include <soc/cpu.h>
24 #include <soc/msr.h>
25 #include <soc/pci_devs.h>
26 #include <soc/soc_chip.h>
27 #include <types.h>
29 enum alderlake_model {
30 ADL_MODEL_P_M = 0x9A,
31 ADL_MODEL_N = 0xBE,
34 bool cpu_soc_is_in_untrusted_mode(void)
36 msr_t msr;
38 msr = rdmsr(MSR_BIOS_DONE);
39 return !!(msr.lo & ENABLE_IA_UNTRUSTED);
42 void cpu_soc_bios_done(void)
44 msr_t msr;
46 msr = rdmsr(MSR_BIOS_DONE);
47 msr.lo |= ENABLE_IA_UNTRUSTED;
48 wrmsr(MSR_BIOS_DONE, msr);
51 static void soc_fsp_load(void)
53 fsps_load();
56 static void configure_misc(void)
58 msr_t msr;
60 const config_t *conf = config_of_soc();
62 msr = rdmsr(IA32_MISC_ENABLE);
63 msr.lo |= (1 << 0); /* Fast String enable */
64 msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
65 wrmsr(IA32_MISC_ENABLE, msr);
67 /* Set EIST status */
68 cpu_set_eist(conf->eist_enable);
70 /* Disable Thermal interrupts */
71 msr.lo = 0;
72 msr.hi = 0;
73 wrmsr(IA32_THERM_INTERRUPT, msr);
75 /* Enable package critical interrupt only */
76 msr.lo = 1 << 4;
77 msr.hi = 0;
78 wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
80 /* Enable PROCHOT and Energy/Performance Bias control */
81 msr = rdmsr(MSR_POWER_CTL);
82 msr.lo |= (1 << 0); /* Enable Bi-directional PROCHOT as an input */
83 msr.lo |= (1 << 23); /* Lock it */
84 msr.lo |= (1 << 18); /* Energy/Performance Bias control */
85 wrmsr(MSR_POWER_CTL, msr);
88 enum core_type get_soc_cpu_type(void)
90 struct cpuinfo_x86 cpuinfo;
92 if (cpu_is_hybrid_supported())
93 return cpu_get_cpu_type();
95 get_fms(&cpuinfo, cpuid_eax(1));
97 if (cpuinfo.x86 == 0x6 && cpuinfo.x86_model == ADL_MODEL_N)
98 return CPUID_CORE_TYPE_INTEL_ATOM;
99 else
100 return CPUID_CORE_TYPE_INTEL_CORE;
103 bool soc_is_nominal_freq_supported(void)
105 return true;
108 /* All CPUs including BSP will run the following function. */
109 void soc_core_init(struct device *cpu)
111 /* Clear out pending MCEs */
112 /* TODO(adurbin): This should only be done on a cold boot. Also, some
113 * of these banks are core vs package scope. For now every CPU clears
114 * every bank. */
115 mca_configure();
117 enable_lapic_tpr();
119 /* Configure Enhanced SpeedStep and Thermal Sensors */
120 configure_misc();
122 enable_pm_timer_emulation();
124 /* Enable Direct Cache Access */
125 configure_dca_cap();
127 /* Set core type in struct cpu_info */
128 set_dev_core_type();
130 /* Set energy policy. The "normal" EPB (6) is not suitable for Alder
131 * Lake or Raptor Lake CPUs, as this results in higher uncore power. */
132 set_energy_perf_bias(7);
134 const config_t *conf = config_of_soc();
135 /* Set energy-performance preference */
136 if (conf->enable_energy_perf_pref)
137 if (check_energy_perf_cap())
138 set_energy_perf_pref(conf->energy_perf_pref_value);
139 /* Enable Turbo */
140 enable_turbo();
142 if (CONFIG(INTEL_TME) && is_tme_supported())
143 set_tme_core_activate();
146 static void per_cpu_smm_trigger(void)
148 /* Relocate the SMM handler. */
149 smm_relocate();
152 static void pre_mp_init(void)
154 soc_fsp_load();
156 const config_t *conf = config_of_soc();
157 if (conf->enable_energy_perf_pref) {
158 if (check_energy_perf_cap())
159 enable_energy_perf_pref();
160 else
161 printk(BIOS_WARNING, "Energy Performance Preference not supported!\n");
165 static void post_mp_init(void)
167 /* Set Max Ratio */
168 cpu_set_max_ratio();
171 * 1. Now that all APs have been relocated as well as the BSP let SMIs
172 * start flowing.
173 * 2. Skip enabling power button SMI and enable it after BS_CHIPS_INIT
174 * to avoid shutdown hang due to lack of init on certain IP in FSP-S.
176 global_smi_enable_no_pwrbtn();
179 static const struct mp_ops mp_ops = {
181 * Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
182 * that are set prior to ramstage.
183 * Real MTRRs programming are being done after resource allocation.
185 .pre_mp_init = pre_mp_init,
186 .get_cpu_count = get_cpu_count,
187 .get_smm_info = smm_info,
188 .get_microcode_info = get_microcode_info,
189 .pre_mp_smm_init = smm_initialize,
190 .per_cpu_smm_trigger = per_cpu_smm_trigger,
191 .relocation_handler = smm_relocation_handler,
192 .post_mp_init = post_mp_init,
195 void mp_init_cpus(struct bus *cpu_bus)
197 /* TODO: Handle mp_init_with_smm failure? */
198 mp_init_with_smm(cpu_bus, &mp_ops);
200 /* Thermal throttle activation offset */
201 configure_tcc_thermal_target();
204 enum adl_cpu_type get_adl_cpu_type(void)
206 const uint16_t adl_m_mch_ids[] = {
207 PCI_DID_INTEL_ADL_M_ID_1,
208 PCI_DID_INTEL_ADL_M_ID_2,
210 const uint16_t adl_p_mch_ids[] = {
211 PCI_DID_INTEL_ADL_P_ID_1,
212 PCI_DID_INTEL_ADL_P_ID_3,
213 PCI_DID_INTEL_ADL_P_ID_4,
214 PCI_DID_INTEL_ADL_P_ID_5,
215 PCI_DID_INTEL_ADL_P_ID_6,
216 PCI_DID_INTEL_ADL_P_ID_7,
217 PCI_DID_INTEL_ADL_P_ID_8,
218 PCI_DID_INTEL_ADL_P_ID_9,
219 PCI_DID_INTEL_ADL_P_ID_10
221 const uint16_t adl_s_mch_ids[] = {
222 PCI_DID_INTEL_ADL_S_ID_1,
223 PCI_DID_INTEL_ADL_S_ID_2,
224 PCI_DID_INTEL_ADL_S_ID_3,
225 PCI_DID_INTEL_ADL_S_ID_4,
226 PCI_DID_INTEL_ADL_S_ID_5,
227 PCI_DID_INTEL_ADL_S_ID_6,
228 PCI_DID_INTEL_ADL_S_ID_7,
229 PCI_DID_INTEL_ADL_S_ID_8,
230 PCI_DID_INTEL_ADL_S_ID_9,
231 PCI_DID_INTEL_ADL_S_ID_10,
232 PCI_DID_INTEL_ADL_S_ID_11,
233 PCI_DID_INTEL_ADL_S_ID_12,
234 PCI_DID_INTEL_ADL_S_ID_13,
235 PCI_DID_INTEL_ADL_S_ID_14,
236 PCI_DID_INTEL_ADL_S_ID_15,
239 const uint16_t adl_n_mch_ids[] = {
240 PCI_DID_INTEL_ADL_N_ID_1,
241 PCI_DID_INTEL_ADL_N_ID_2,
242 PCI_DID_INTEL_ADL_N_ID_3,
243 PCI_DID_INTEL_ADL_N_ID_4,
244 PCI_DID_INTEL_ADL_N_ID_5,
247 const uint16_t rpl_hx_mch_ids[] = {
248 PCI_DID_INTEL_RPL_HX_ID_1,
249 PCI_DID_INTEL_RPL_HX_ID_2,
250 PCI_DID_INTEL_RPL_HX_ID_3,
251 PCI_DID_INTEL_RPL_HX_ID_4,
252 PCI_DID_INTEL_RPL_HX_ID_5,
253 PCI_DID_INTEL_RPL_HX_ID_6,
254 PCI_DID_INTEL_RPL_HX_ID_7,
255 PCI_DID_INTEL_RPL_HX_ID_8,
258 const uint16_t rpl_s_mch_ids[] = {
259 PCI_DID_INTEL_RPL_S_ID_1,
260 PCI_DID_INTEL_RPL_S_ID_2,
261 PCI_DID_INTEL_RPL_S_ID_3,
262 PCI_DID_INTEL_RPL_S_ID_4,
263 PCI_DID_INTEL_RPL_S_ID_5
266 const uint16_t rpl_p_mch_ids[] = {
267 PCI_DID_INTEL_RPL_P_ID_1,
268 PCI_DID_INTEL_RPL_P_ID_2,
269 PCI_DID_INTEL_RPL_P_ID_3,
270 PCI_DID_INTEL_RPL_P_ID_4,
271 PCI_DID_INTEL_RPL_P_ID_5,
272 PCI_DID_INTEL_RPL_P_ID_6,
273 PCI_DID_INTEL_RPL_P_ID_7,
274 PCI_DID_INTEL_RPL_P_ID_8,
277 const uint16_t mchid = pci_s_read_config16(PCI_DEV(0, PCI_SLOT(SA_DEVFN_ROOT),
278 PCI_FUNC(SA_DEVFN_ROOT)),
279 PCI_DEVICE_ID);
281 for (size_t i = 0; i < ARRAY_SIZE(adl_p_mch_ids); i++) {
282 if (adl_p_mch_ids[i] == mchid)
283 return ADL_P;
286 for (size_t i = 0; i < ARRAY_SIZE(adl_m_mch_ids); i++) {
287 if (adl_m_mch_ids[i] == mchid)
288 return ADL_M;
291 for (size_t i = 0; i < ARRAY_SIZE(adl_s_mch_ids); i++) {
292 if (adl_s_mch_ids[i] == mchid)
293 return ADL_S;
296 for (size_t i = 0; i < ARRAY_SIZE(rpl_s_mch_ids); i++) {
297 if (rpl_s_mch_ids[i] == mchid)
298 return RPL_S;
301 for (size_t i = 0; i < ARRAY_SIZE(adl_n_mch_ids); i++) {
302 if (adl_n_mch_ids[i] == mchid)
303 return ADL_N;
306 for (size_t i = 0; i < ARRAY_SIZE(rpl_hx_mch_ids); i++) {
307 if (rpl_hx_mch_ids[i] == mchid)
308 return RPL_HX;
311 for (size_t i = 0; i < ARRAY_SIZE(rpl_p_mch_ids); i++) {
312 if (rpl_p_mch_ids[i] == mchid)
313 return RPL_P;
316 return ADL_UNKNOWN;
319 uint8_t get_supported_lpm_mask(void)
321 enum adl_cpu_type type = get_adl_cpu_type();
322 switch (type) {
323 case ADL_M: /* fallthrough */
324 case ADL_N:
325 case ADL_P:
326 case RPL_P:
327 return LPM_S0i2_0 | LPM_S0i3_0;
328 case ADL_S:
329 case RPL_S:
330 case RPL_HX:
331 return LPM_S0i2_0 | LPM_S0i2_1;
332 default:
333 printk(BIOS_ERR, "Unknown ADL CPU type: %d\n", type);
334 return 0;
338 int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id)
340 if (!CONFIG(CHROMEOS))
341 return 0;
343 * Locked RO Descriptor Implications:
345 * - A locked descriptor signals the RO binary is fixed; the FIT will load the
346 * RO's microcode during system reset.
347 * - Attempts to load newer microcode from the RW CBFS will cause a boot-time
348 * delay (~60ms, core-dependent), as the microcode must be reloaded on BSP+APs.
349 * - The kernel can load microcode updates without impacting AP FW boot time.
350 * - Skipping RW CBFS microcode loading is low-risk when the RO is locked,
351 * prioritizing fast boot times.
353 if (CONFIG(LOCK_MANAGEMENT_ENGINE) && current_patch_id)
354 return 1;
356 return 0;