1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <console/console.h>
5 #include <cpu/intel/common/common.h>
6 #include <cpu/intel/microcode.h>
7 #include <cpu/intel/smm_reloc.h>
8 #include <cpu/intel/turbo.h>
9 #include <cpu/x86/lapic.h>
10 #include <cpu/x86/mp.h>
11 #include <cpu/x86/msr.h>
12 #include <device/pci.h>
14 #include <intelblocks/acpi.h>
15 #include <intelblocks/cpulib.h>
16 #include <intelblocks/mp_init.h>
17 #include <intelblocks/msr.h>
20 #include <soc/pci_devs.h>
21 #include <soc/soc_chip.h>
22 #include <soc/soc_info.h>
25 bool cpu_soc_is_in_untrusted_mode(void)
29 msr
= rdmsr(MSR_BIOS_DONE
);
30 return !!(msr
.lo
& ENABLE_IA_UNTRUSTED
);
33 void cpu_soc_bios_done(void)
37 msr
= rdmsr(MSR_BIOS_DONE
);
38 msr
.lo
|= ENABLE_IA_UNTRUSTED
;
39 wrmsr(MSR_BIOS_DONE
, msr
);
42 uint8_t get_supported_lpm_mask(void)
44 return LPM_S0i2_0
| LPM_S0i2_1
| LPM_S0i2_2
;
47 static void soc_fsp_load(void)
52 static void configure_misc(void)
56 config_t
*conf
= (config_t
*)config_of_soc();
58 msr
= rdmsr(IA32_MISC_ENABLE
);
59 msr
.lo
|= (1 << 0); /* Fast String enable */
60 msr
.lo
|= (1 << 3); /* TM1/TM2/EMTTM enable */
61 wrmsr(IA32_MISC_ENABLE
, msr
);
64 cpu_set_eist(conf
->eist_enable
);
66 /* Disable Thermal interrupts */
69 wrmsr(IA32_THERM_INTERRUPT
, msr
);
71 /* Enable package critical interrupt only */
74 wrmsr(IA32_PACKAGE_THERM_INTERRUPT
, msr
);
76 /* Enable PROCHOT and Power Performance Platform Override */
77 msr
= rdmsr(MSR_POWER_CTL
);
78 msr
.lo
|= (1 << 0); /* Enable Bi-directional PROCHOT as an input*/
79 msr
.lo
|= (1 << 23); /* Lock it */
80 msr
.lo
|= (1 << 18); /* Power Performance Platform Override */
81 wrmsr(MSR_POWER_CTL
, msr
);
84 enum core_type
get_soc_cpu_type(void)
86 if (cpu_is_hybrid_supported())
87 return cpu_get_cpu_type();
89 return CPUID_CORE_TYPE_INTEL_CORE
;
92 bool soc_is_nominal_freq_supported(void)
97 static void enable_x2apic(void)
99 if (!CONFIG(X2APIC_LATE_WORKAROUND
))
102 enable_lapic_mode(true);
105 /* All CPUs including BSP will run the following function. */
106 void soc_core_init(struct device
*cpu
)
108 /* Clear out pending MCEs */
109 /* TODO(adurbin): This should only be done on a cold boot. Also, some
110 * of these banks are core vs package scope. For now every CPU clears
118 /* Configure Enhanced SpeedStep and Thermal Sensors */
121 enable_pm_timer_emulation();
123 /* Enable Direct Cache Access */
126 /* Set energy policy */
127 set_energy_perf_bias(ENERGY_POLICY_NORMAL
);
129 const config_t
*conf
= config_of_soc();
130 /* Set energy-performance preference */
131 if (conf
->enable_energy_perf_pref
)
132 if (check_energy_perf_cap())
133 set_energy_perf_pref(conf
->energy_perf_pref_value
);
138 /* Set core type in struct cpu_info */
141 if (CONFIG(INTEL_TME
) && is_tme_supported())
142 set_tme_core_activate();
144 if (CONFIG(DROP_CPU_FEATURE_PROGRAM_IN_FSP
)) {
145 /* Disable 3-strike error */
146 if (CONFIG(SOC_INTEL_METEORLAKE_PRE_PRODUCTION_SILICON
))
147 disable_three_strike_error();
149 disable_signaling_three_strike_event();
154 set_feature_ctrl_vmx_arg(CONFIG(ENABLE_VMX
) && !conf
->disable_vmx
);
156 /* Feature control lock configure */
157 set_feature_ctrl_lock();
161 static void per_cpu_smm_trigger(void)
163 /* Relocate the SMM handler. */
167 static void pre_mp_init(void)
171 const config_t
*conf
= config_of_soc();
172 if (conf
->enable_energy_perf_pref
) {
173 if (check_energy_perf_cap())
174 enable_energy_perf_pref();
176 printk(BIOS_WARNING
, "Energy Performance Preference not supported!\n");
180 static void post_mp_init(void)
186 * 1. Now that all APs have been relocated as well as the BSP let SMIs
188 * 2. Skip enabling power button SMI and enable it after BS_CHIPS_INIT
189 * to avoid shutdown hang due to lack of init on certain IP in FSP-S.
191 global_smi_enable_no_pwrbtn();
194 static const struct mp_ops mp_ops
= {
196 * Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
197 * that are set prior to ramstage.
198 * Real MTRRs programming are being done after resource allocation.
200 .pre_mp_init
= pre_mp_init
,
201 .get_cpu_count
= get_cpu_count
,
202 .get_smm_info
= smm_info
,
203 .get_microcode_info
= get_microcode_info
,
204 .pre_mp_smm_init
= smm_initialize
,
205 .per_cpu_smm_trigger
= per_cpu_smm_trigger
,
206 .relocation_handler
= smm_relocation_handler
,
207 .post_mp_init
= post_mp_init
,
210 void mp_init_cpus(struct bus
*cpu_bus
)
212 if (mp_init_with_smm(cpu_bus
, &mp_ops
))
213 printk(BIOS_ERR
, "MP initialization failure.\n");
215 /* Thermal throttle activation offset */
216 configure_tcc_thermal_target();
219 int soc_skip_ucode_update(u32 current_patch_id
, u32 new_patch_id
)
221 if (!CONFIG(CHROMEOS
))
224 * Locked RO Descriptor Implications:
226 * - A locked descriptor signals the RO binary is fixed; the FIT will load the
227 * RO's microcode during system reset.
228 * - Attempts to load newer microcode from the RW CBFS will cause a boot-time
229 * delay (~60ms, core-dependent), as the microcode must be reloaded on BSP+APs.
230 * - The kernel can load microcode updates without impacting AP FW boot time.
231 * - Skipping RW CBFS microcode loading is low-risk when the RO is locked,
232 * prioritizing fast boot times.
234 if (CONFIG(LOCK_MANAGEMENT_ENGINE
) && current_patch_id
)