1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #include <console/console.h>
5 #include <cpu/x86/cr.h>
6 #include <cpu/x86/lapic.h>
7 #include <cpu/x86/mp.h>
8 #include <cpu/x86/msr.h>
9 #include <cpu/x86/mtrr.h>
10 #include <cpu/x86/smm.h>
11 #include <cpu/intel/smm_reloc.h>
12 #include <cpu/intel/em64t100_save_state.h>
13 #include <cpu/intel/turbo.h>
14 #include <cpu/intel/common/common.h>
15 #include <device/device.h>
16 #include <device/pci.h>
17 #include <intelblocks/cpulib.h>
21 #include <soc/iomap.h>
23 #include <soc/soc_util.h>
26 bool cpu_soc_is_in_untrusted_mode(void)
30 msr
= rdmsr(MSR_POWER_MISC
);
31 return !!(msr
.lo
& ENABLE_IA_UNTRUSTED
);
34 void cpu_soc_bios_done(void)
38 msr
= rdmsr(MSR_POWER_MISC
);
39 msr
.lo
|= ENABLE_IA_UNTRUSTED
;
40 wrmsr(MSR_POWER_MISC
, msr
);
43 static struct smm_relocation_attrs relo_attrs
;
45 static void dnv_configure_mca(void)
48 struct cpuid_result cpuid_regs
;
50 /* Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
51 * and CPUID.(EAX=1):EDX[14]==1 MCA*/
52 cpuid_regs
= cpuid(1);
53 if ((cpuid_regs
.edx
& (1<<7 | 1<<14)) != (1<<7 | 1<<14))
56 msr
= rdmsr(IA32_MCG_CAP
);
57 if (msr
.lo
& IA32_MCG_CAP_CTL_P_MASK
) {
58 /* Enable all error logging */
59 msr
.lo
= msr
.hi
= 0xffffffff;
60 wrmsr(IA32_MCG_CTL
, msr
);
63 /* TODO(adurbin): This should only be done on a cold boot. Also, some
64 of these banks are core vs package scope. For now every CPU clears
68 /* TODO install a fallback MC handler for each core in case OS does
69 not provide one. Is it really needed? */
71 /* Enable the machine check exception */
72 write_cr4(read_cr4() | CR4_MCE
);
75 static void configure_thermal_core(void)
79 /* Disable Thermal interrupts */
82 wrmsr(IA32_THERM_INTERRUPT
, msr
);
83 wrmsr(IA32_PACKAGE_THERM_INTERRUPT
, msr
);
85 msr
= rdmsr(IA32_MISC_ENABLE
);
86 msr
.lo
|= THERMAL_MONITOR_ENABLE_BIT
; /* TM1/TM2/EMTTM enable */
87 wrmsr(IA32_MISC_ENABLE
, msr
);
90 static void denverton_core_init(struct device
*cpu
)
94 printk(BIOS_DEBUG
, "Init Denverton-NS SoC cores.\n");
96 /* Clear out pending MCEs */
99 /* Configure Thermal Sensors */
100 configure_thermal_core();
102 /* Enable Fast Strings */
103 msr
= rdmsr(IA32_MISC_ENABLE
);
104 msr
.lo
|= FAST_STRINGS_ENABLE_BIT
;
105 wrmsr(IA32_MISC_ENABLE
, msr
);
112 /* Enable speed step. Always ON.*/
113 msr
= rdmsr(IA32_MISC_ENABLE
);
114 msr
.lo
|= SPEED_STEP_ENABLE_BIT
;
115 wrmsr(IA32_MISC_ENABLE
, msr
);
117 enable_pm_timer_emulation();
120 static struct device_operations cpu_dev_ops
= {
121 .init
= denverton_core_init
,
124 static const struct cpu_device_id cpu_table
[] = {
125 /* Denverton-NS A0/A1 CPUID */
126 {X86_VENDOR_INTEL
, CPUID_DENVERTON_A0_A1
, CPUID_EXACT_MATCH_MASK
},
127 /* Denverton-NS B0 CPUID */
128 {X86_VENDOR_INTEL
, CPUID_DENVERTON_B0
, CPUID_EXACT_MATCH_MASK
},
132 static const struct cpu_driver driver __cpu_driver
= {
134 .id_table
= cpu_table
,
138 * MP and SMM loading initialization.
141 static void relocation_handler(int cpu
, uintptr_t curr_smbase
,
142 uintptr_t staggered_smbase
)
145 em64t100_smm_state_save_area_t
*smm_state
;
149 smrr
.lo
= relo_attrs
.smrr_base
;
151 wrmsr(IA32_SMRR_PHYS_BASE
, smrr
);
152 smrr
.lo
= relo_attrs
.smrr_mask
;
154 wrmsr(IA32_SMRR_PHYS_MASK
, smrr
);
155 smm_state
= (void *)(SMM_EM64T100_SAVE_STATE_OFFSET
+ curr_smbase
);
156 smm_state
->smbase
= staggered_smbase
;
159 static void get_smm_info(uintptr_t *perm_smbase
, size_t *perm_smsize
,
160 size_t *smm_save_state_size
)
164 uintptr_t handler_base
;
167 /* All range registers are aligned to 4KiB */
168 const uint32_t rmask
= ~((1 << 12) - 1);
170 /* Initialize global tracking state. */
171 smm_region(&smm_base
, &smm_size
);
172 smm_subregion(SMM_SUBREGION_HANDLER
, &handler_base
, &handler_size
);
174 relo_attrs
.smbase
= smm_base
;
175 relo_attrs
.smrr_base
= relo_attrs
.smbase
| MTRR_TYPE_WRBACK
;
176 relo_attrs
.smrr_mask
= ~(smm_size
- 1) & rmask
;
177 relo_attrs
.smrr_mask
|= MTRR_PHYS_MASK_VALID
;
179 *perm_smbase
= handler_base
;
180 *perm_smsize
= handler_size
;
181 *smm_save_state_size
= sizeof(em64t100_smm_state_save_area_t
);
184 static unsigned int detect_num_cpus_via_cpuid(void)
186 unsigned int ecx
= 0;
189 const struct cpuid_result leaf_b
= cpuid_ext(0xb, ecx
);
191 /* Processor doesn't have hyperthreading so just determine the
192 number of cores from level type (ecx[15:8] == 2). */
193 if ((leaf_b
.ecx
>> 8 & 0xff) == 2)
194 return leaf_b
.ebx
& 0xffff;
200 /* Assumes that FSP has already programmed the cores disabled register */
201 static unsigned int detect_num_cpus_via_mch(void)
203 /* Get Masks for Total Existing SOC Cores and Core Disable Mask */
204 const u32 core_exists_mask
= MMIO32(DEFAULT_MCHBAR
+ MCH_BAR_CORE_EXISTS_MASK
);
205 const u32 core_disable_mask
= MMIO32(DEFAULT_MCHBAR
+ MCH_BAR_CORE_DISABLE_MASK
);
206 const u32 active_cores_mask
= ~core_disable_mask
& core_exists_mask
;
208 /* Calculate Number of Active Cores */
209 const unsigned int active_cores
= popcnt(active_cores_mask
);
210 const unsigned int total_cores
= popcnt(core_exists_mask
);
212 printk(BIOS_DEBUG
, "Number of Active Cores: %u of %u total.\n",
213 active_cores
, total_cores
);
218 /* Find CPU topology */
219 int get_cpu_count(void)
221 unsigned int num_cpus
= detect_num_cpus_via_mch();
223 if (num_cpus
== 0 || num_cpus
> CONFIG_MAX_CPUS
) {
224 num_cpus
= detect_num_cpus_via_cpuid();
225 printk(BIOS_DEBUG
, "Number of Cores (CPUID): %u.\n", num_cpus
);
230 static void set_max_turbo_freq(void)
236 /* Check for configurable TDP option */
237 if (get_turbo_state() == TURBO_ENABLED
) {
238 msr
= rdmsr(MSR_TURBO_RATIO_LIMIT
);
239 perf_ctl
.lo
= (msr
.lo
& 0xff) << 8;
241 } else if (cpu_config_tdp_levels()) {
242 /* Set to nominal TDP ratio */
243 msr
= rdmsr(MSR_CONFIG_TDP_NOMINAL
);
244 perf_ctl
.lo
= (msr
.lo
& 0xff) << 8;
247 /* Platform Info bits 15:8 give max ratio */
248 msr
= rdmsr(MSR_PLATFORM_INFO
);
249 perf_ctl
.lo
= msr
.lo
& 0xff00;
251 wrmsr(IA32_PERF_CTL
, perf_ctl
);
253 printk(BIOS_DEBUG
, "cpu: frequency set to %d\n",
254 ((perf_ctl
.lo
>> 8) & 0xff) * CPU_BCLK
);
258 * Do essential initialization tasks before APs can be fired up
260 * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
261 * creates the MTRR solution that the APs will use. Otherwise APs will try to
262 * apply the incomplete solution as the BSP is calculating it.
264 static void pre_mp_init(void)
266 x86_setup_mtrrs_with_detect();
270 static void post_mp_init(void)
273 set_max_turbo_freq();
276 * Now that all APs have been relocated as well as the BSP let SMIs
283 * CPU initialization recipe
285 * Note that no microcode update is passed to the init function. CSE updates
286 * the microcode on all cores before releasing them from reset. That means that
287 * the BSP and all APs will come up with the same microcode revision.
289 static const struct mp_ops mp_ops
= {
290 .pre_mp_init
= pre_mp_init
,
291 .get_cpu_count
= get_cpu_count
,
292 .get_smm_info
= get_smm_info
,
293 .pre_mp_smm_init
= smm_southbridge_clear_state
,
294 .relocation_handler
= relocation_handler
,
295 .post_mp_init
= post_mp_init
,
298 void mp_init_cpus(struct bus
*cpu_bus
)
300 /* Clear for take-off */
301 /* TODO: Handle mp_init_with_smm failure? */
302 mp_init_with_smm(cpu_bus
, &mp_ops
);