1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 #include <console/console.h>
8 #include <cpu/x86/mp.h>
9 #include <cpu/intel/microcode.h>
10 #include <cpu/intel/turbo.h>
11 #include <cpu/intel/common/common.h>
12 #include <cpu/x86/msr.h>
13 #include <cpu/x86/mtrr.h>
14 #include <cpu/x86/smm.h>
15 #include <cpu/intel/em64t100_save_state.h>
16 #include <cpu/intel/microcode.h>
17 #include <cpu/intel/smm_reloc.h>
18 #include <device/device.h>
19 #include <device/pci.h>
21 #include <intelblocks/cpulib.h>
22 #include <intelblocks/fast_spi.h>
23 #include <intelblocks/mp_init.h>
24 #include <intelblocks/msr.h>
25 #include <intelblocks/sgx.h>
26 #include <reg_script.h>
28 #include <soc/iomap.h>
29 #include <soc/pci_devs.h>
33 static const struct reg_script core_msr_script
[] = {
34 #if !CONFIG(SOC_INTEL_GEMINILAKE)
35 /* Enable C-state and IO/MWAIT redirect */
36 REG_MSR_WRITE(MSR_PKG_CST_CONFIG_CONTROL
,
37 (PKG_C_STATE_LIMIT_C2_MASK
| CORE_C_STATE_LIMIT_C10_MASK
38 | IO_MWAIT_REDIRECT_MASK
| CST_CFG_LOCK_MASK
)),
39 /* Power Management I/O base address for I/O trapping to C-states */
40 REG_MSR_WRITE(MSR_PMG_IO_CAPTURE_BASE
,
41 (ACPI_PMIO_CST_REG
| (PMG_IO_BASE_CST_RNG_BLK_SIZE
<< 16))),
42 /* Disable support for MONITOR and MWAIT instructions */
43 REG_MSR_RMW(IA32_MISC_ENABLE
, ~MONITOR_MWAIT_DIS_MASK
, 0),
46 REG_MSR_RMW(MSR_POWER_CTL
, ~POWER_CTL_C1E_MASK
, 0),
50 bool cpu_soc_is_in_untrusted_mode(void)
54 msr
= rdmsr(MSR_POWER_MISC
);
55 return !!(msr
.lo
& ENABLE_IA_UNTRUSTED
);
58 void cpu_soc_bios_done(void)
62 msr
= rdmsr(MSR_POWER_MISC
);
63 msr
.lo
|= ENABLE_IA_UNTRUSTED
;
64 wrmsr(MSR_POWER_MISC
, msr
);
67 void soc_core_init(struct device
*cpu
)
69 /* Configure Core PRMRR for SGX. */
70 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE
))
71 prmrr_core_configure();
73 /* Clear out pending MCEs */
74 /* TODO(adurbin): Some of these banks are core vs package
75 scope. For now every CPU clears every bank. */
76 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE
) || acpi_get_sleep_type() == ACPI_S5
)
80 reg_script_run(core_msr_script
);
84 /* Set virtualization based on Kconfig option */
88 * Enable ACPI PM timer emulation, which also lets microcode know
89 * location of ACPI_BASE_ADDRESS. This also enables other features
90 * implemented in microcode.
92 enable_pm_timer_emulation();
94 /* Set Max Non-Turbo ratio if RAPL is disabled. */
95 if (CONFIG(SOC_INTEL_DISABLE_POWER_LIMITS
)) {
96 cpu_set_p_state_to_max_non_turbo_ratio();
97 /* Disable speed step */
99 } else if (CONFIG(SOC_INTEL_SET_MIN_CLOCK_RATIO
)) {
100 cpu_set_p_state_to_min_clock_ratio();
101 /* Disable speed step */
106 #if !CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)
107 static void soc_init_core(struct device
*cpu
)
112 static struct device_operations cpu_dev_ops
= {
113 .init
= soc_init_core
,
116 static const struct cpu_device_id cpu_table
[] = {
117 { X86_VENDOR_INTEL
, CPUID_APOLLOLAKE_A0
, CPUID_EXACT_MATCH_MASK
},
118 { X86_VENDOR_INTEL
, CPUID_APOLLOLAKE_B0
, CPUID_EXACT_MATCH_MASK
},
119 { X86_VENDOR_INTEL
, CPUID_APOLLOLAKE_E0
, CPUID_EXACT_MATCH_MASK
},
120 { X86_VENDOR_INTEL
, CPUID_GLK_A0
, CPUID_EXACT_MATCH_MASK
},
121 { X86_VENDOR_INTEL
, CPUID_GLK_B0
, CPUID_EXACT_MATCH_MASK
},
122 { X86_VENDOR_INTEL
, CPUID_GLK_R0
, CPUID_EXACT_MATCH_MASK
},
126 static const struct cpu_driver driver __cpu_driver
= {
128 .id_table
= cpu_table
,
133 * MP and SMM loading initialization.
135 struct smm_relocation_attrs
{
141 static struct smm_relocation_attrs relo_attrs
;
144 * Do essential initialization tasks before APs can be fired up.
146 * IF (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) -
147 * Skip Pre MP init MTRR programming, as MTRRs are mirrored from BSP,
148 * that are set prior to ramstage.
149 * Real MTRRs are programmed after resource allocation.
151 * Do FSP loading before MP Init to ensure that the FSP component stored in
152 * external stage cache in TSEG does not flush off due to SMM relocation
153 * during MP Init stage.
156 * Enable MTRRs on the BSP. This creates the MTRR solution that the
157 * APs will use. Otherwise APs will try to apply the incomplete solution
158 * as the BSP is calculating it.
160 static void pre_mp_init(void)
162 if (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT
)) {
166 x86_setup_mtrrs_with_detect();
170 #if !CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)
171 static void read_cpu_topology(unsigned int *num_phys
, unsigned int *num_virt
)
174 msr
= rdmsr(MSR_CORE_THREAD_COUNT
);
175 *num_virt
= (msr
.lo
>> 0) & 0xffff;
176 *num_phys
= (msr
.lo
>> 16) & 0xffff;
179 /* Find CPU topology */
180 int get_cpu_count(void)
182 unsigned int num_virt_cores
, num_phys_cores
;
184 read_cpu_topology(&num_phys_cores
, &num_virt_cores
);
186 printk(BIOS_DEBUG
, "Detected %u core, %u thread CPU.\n",
187 num_phys_cores
, num_virt_cores
);
189 return num_virt_cores
;
192 void get_microcode_info(const void **microcode
, int *parallel
)
194 *microcode
= intel_microcode_find();
197 /* Make sure BSP is using the microcode from cbfs */
198 intel_microcode_load_unlocked(*microcode
);
202 static void get_smm_info(uintptr_t *perm_smbase
, size_t *perm_smsize
,
203 size_t *smm_save_state_size
)
207 uintptr_t handler_base
;
210 /* All range registers are aligned to 4KiB */
211 const uint32_t rmask
= ~((1 << 12) - 1);
213 /* Initialize global tracking state. */
214 smm_region(&smm_base
, &smm_size
);
215 smm_subregion(SMM_SUBREGION_HANDLER
, &handler_base
, &handler_size
);
217 relo_attrs
.smbase
= smm_base
;
218 relo_attrs
.smrr_base
= relo_attrs
.smbase
| MTRR_TYPE_WRBACK
;
219 relo_attrs
.smrr_mask
= ~(smm_size
- 1) & rmask
;
220 relo_attrs
.smrr_mask
|= MTRR_PHYS_MASK_VALID
;
222 *perm_smbase
= handler_base
;
223 *perm_smsize
= handler_size
;
224 *smm_save_state_size
= sizeof(em64t100_smm_state_save_area_t
);
227 static void relocation_handler(int cpu
, uintptr_t curr_smbase
,
228 uintptr_t staggered_smbase
)
231 em64t100_smm_state_save_area_t
*smm_state
;
233 smrr
.lo
= relo_attrs
.smrr_base
;
235 wrmsr(IA32_SMRR_PHYS_BASE
, smrr
);
236 smrr
.lo
= relo_attrs
.smrr_mask
;
238 wrmsr(IA32_SMRR_PHYS_MASK
, smrr
);
239 smm_state
= (void *)(SMM_EM64T100_SAVE_STATE_OFFSET
+ curr_smbase
);
240 smm_state
->smbase
= staggered_smbase
;
243 * CPU initialization recipe
245 * Note that no microcode update is passed to the init function. CSE updates
246 * the microcode on all cores before releasing them from reset. That means that
247 * the BSP and all APs will come up with the same microcode revision.
250 static void post_mp_init(void)
254 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE
))
255 mp_run_on_all_cpus(sgx_configure
, NULL
);
258 static const struct mp_ops mp_ops
= {
259 .pre_mp_init
= pre_mp_init
,
260 .get_cpu_count
= get_cpu_count
,
261 .get_smm_info
= get_smm_info
,
262 .get_microcode_info
= get_microcode_info
,
263 .pre_mp_smm_init
= smm_southbridge_clear_state
,
264 .relocation_handler
= relocation_handler
,
265 .post_mp_init
= post_mp_init
,
268 void mp_init_cpus(struct bus
*cpu_bus
)
270 /* Clear for take-off */
271 /* TODO: Handle mp_init_with_smm failure? */
272 mp_init_with_smm(cpu_bus
, &mp_ops
);
274 /* MTRR setup happens later, so we're done here. */
275 if (CONFIG(SOC_INTEL_COMMON_BLOCK_CPU_MPINIT
))
278 /* Temporarily cache the memory-mapped boot media. */
279 if (CONFIG(BOOT_DEVICE_MEMORY_MAPPED
) &&
280 CONFIG(BOOT_DEVICE_SPI_FLASH
))
281 fast_spi_cache_bios_region();
284 #if CONFIG(SOC_INTEL_GEMINILAKE)
285 int soc_skip_ucode_update(u32 current_patch_id
, u32 new_patch_id
)
288 * If PRMRR/SGX is supported the FIT microcode load will set the msr
289 * 0x08b with the Patch revision id one less than the id in the
290 * microcode binary. The PRMRR support is indicated in the MSR
291 * MTRRCAP[12]. If SGX is not enabled, check and avoid reloading the
292 * same microcode during CPU initialization. If SGX is enabled, as
293 * part of SGX BIOS initialization steps, the same microcode needs to
294 * be reloaded after the core PRMRR MSRs are programmed.
296 const msr_t mtrr_cap
= rdmsr(MTRR_CAP_MSR
);
297 if (mtrr_cap
.lo
& MTRR_CAP_PRMRR
) {
298 const msr_t prmrr_phys_base
= rdmsr(MSR_PRMRR_PHYS_BASE
);
299 if (prmrr_phys_base
.raw
) {
303 return current_patch_id
== new_patch_id
- 1;