mb/system76/cml-u/dt: Make use of chipset devicetree
[coreboot.git] / src / soc / intel / xeon_sp / skx / cpu.c
blob009527c84ef5bff42cc69031ff55b297dadf1ee0
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #include <console/console.h>
4 #include <console/debug.h>
5 #include <intelblocks/cpulib.h>
6 #include <cpu/cpu.h>
7 #include <cpu/intel/cpu_ids.h>
8 #include <cpu/x86/mtrr.h>
9 #include <cpu/x86/mp.h>
10 #include <cpu/intel/turbo.h>
11 #include <soc/msr.h>
12 #include <soc/soc_util.h>
13 #include <soc/smmrelocate.h>
14 #include <soc/util.h>
15 #include <assert.h>
16 #include "chip.h"
17 #include <cpu/intel/smm_reloc.h>
18 #include <cpu/intel/em64t101_save_state.h>
19 #include <types.h>
21 static const config_t *chip_config = NULL;
23 bool cpu_soc_is_in_untrusted_mode(void)
25 /* IA_UNTRUSTED_MODE is not supported in Skylake */
26 return false;
29 void cpu_soc_bios_done(void)
31 /* IA_UNTRUSTED_MODE is not supported in Skylake */
34 static void xeon_configure_mca(void)
36 msr_t msr;
37 struct cpuid_result cpuid_regs;
39 /* Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
40 * and CPUID.(EAX=1):EDX[14]==1 MCA*/
41 cpuid_regs = cpuid(1);
42 if ((cpuid_regs.edx & (1<<7 | 1<<14)) != (1<<7 | 1<<14))
43 return;
45 msr = rdmsr(IA32_MCG_CAP);
46 if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) {
47 /* Enable all error logging */
48 msr.lo = msr.hi = 0xffffffff;
49 wrmsr(IA32_MCG_CTL, msr);
52 /* TODO(adurbin): This should only be done on a cold boot. Also, some
53 of these banks are core vs package scope. For now every CPU clears
54 every bank. */
55 mca_configure();
58 static void xeon_sp_core_init(struct device *cpu)
60 msr_t msr;
62 printk(BIOS_INFO, "%s dev: %s, cpu: %lu, apic_id: 0x%x, package_id: 0x%x\n",
63 __func__, dev_path(cpu), cpu_index(), cpu->path.apic.apic_id,
64 cpu->path.apic.package_id);
65 assert(chip_config);
67 /* set MSR_PKG_CST_CONFIG_CONTROL - scope per core*/
68 msr.hi = 0;
69 msr.lo = (PKG_CSTATE_NO_LIMIT | CFG_LOCK_ENABLE);
70 wrmsr(MSR_PKG_CST_CONFIG_CONTROL, msr);
72 /* Enable Energy Perf Bias Access, Dynamic switching and lock MSR */
73 msr = rdmsr(MSR_POWER_CTL);
74 msr.lo |= (ENERGY_PERF_BIAS_ACCESS_ENABLE | PWR_PERF_TUNING_DYN_SWITCHING_ENABLE
75 | PROCHOT_LOCK_ENABLE);
76 wrmsr(MSR_POWER_CTL, msr);
78 /* Set P-State ratio */
79 msr = rdmsr(MSR_IA32_PERF_CTRL);
80 msr.lo &= ~PSTATE_REQ_MASK;
81 msr.lo |= (chip_config->pstate_req_ratio << PSTATE_REQ_SHIFT);
82 wrmsr(MSR_IA32_PERF_CTRL, msr);
85 * Set HWP base feature, EPP reg enumeration, lock thermal and msr
86 * TODO: Set LOCK_MISC_PWR_MGMT_MSR, Unexpected Exception if you
87 * lock & issue wrmsr on every thread
88 * This is package level MSR. Need to check if it updates correctly on
89 * multi-socket platform.
91 msr = rdmsr(MSR_MISC_PWR_MGMT);
92 if (!(msr.lo & LOCK_MISC_PWR_MGMT_MSR)) { /* if already locked skip update */
93 msr.lo = (HWP_ENUM_ENABLE | HWP_EPP_ENUM_ENABLE | LOCK_MISC_PWR_MGMT_MSR |
94 LOCK_THERM_INT);
95 wrmsr(MSR_MISC_PWR_MGMT, msr);
98 /* TODO MSR_VR_MISC_CONFIG */
100 /* Set current limit lock */
101 msr = rdmsr(MSR_VR_CURRENT_CONFIG);
102 msr.lo |= CURRENT_LIMIT_LOCK;
103 wrmsr(MSR_VR_CURRENT_CONFIG, msr);
105 /* Set Turbo Ratio Limits */
106 msr.lo = chip_config->turbo_ratio_limit & 0xffffffff;
107 msr.hi = (chip_config->turbo_ratio_limit >> 32) & 0xffffffff;
108 wrmsr(MSR_TURBO_RATIO_LIMIT, msr);
110 /* Set Turbo Ratio Limit Cores */
111 msr.lo = chip_config->turbo_ratio_limit_cores & 0xffffffff;
112 msr.hi = (chip_config->turbo_ratio_limit_cores >> 32) & 0xffffffff;
113 wrmsr(MSR_TURBO_RATIO_LIMIT_CORES, msr);
115 /* set Turbo Activation ratio */
116 msr.hi = 0;
117 msr = rdmsr(MSR_TURBO_ACTIVATION_RATIO);
118 msr.lo |= MAX_NON_TURBO_RATIO;
119 wrmsr(MSR_TURBO_ACTIVATION_RATIO, msr);
121 /* Enable Fast Strings */
122 msr = rdmsr(IA32_MISC_ENABLE);
123 msr.lo |= FAST_STRINGS_ENABLE_BIT;
124 wrmsr(IA32_MISC_ENABLE, msr);
126 /* Set energy policy */
127 msr_t msr1 = rdmsr(MSR_ENERGY_PERF_BIAS_CONFIG);
128 msr.lo = (msr1.lo & EPB_ENERGY_POLICY_MASK) >> EPB_ENERGY_POLICY_SHIFT;
129 msr.hi = 0;
130 wrmsr(MSR_IA32_ENERGY_PERF_BIAS, msr);
132 /* Enable Turbo */
133 enable_turbo();
135 /* Enable speed step. */
136 if (get_turbo_state() == TURBO_ENABLED) {
137 msr = rdmsr(IA32_MISC_ENABLE);
138 msr.lo |= SPEED_STEP_ENABLE_BIT;
139 wrmsr(IA32_MISC_ENABLE, msr);
142 /* Clear out pending MCEs */
143 xeon_configure_mca();
146 static struct device_operations cpu_dev_ops = {
147 .init = xeon_sp_core_init,
150 static const struct cpu_device_id cpu_table[] = {
151 /* Skylake-SP A0/A1 CPUID 0x506f0*/
152 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_A0_A1, CPUID_EXACT_MATCH_MASK },
153 /* Skylake-SP B0 CPUID 0x506f1*/
154 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_B0, CPUID_EXACT_MATCH_MASK },
155 /* Skylake-SP 4 CPUID 0x50654*/
156 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_4, CPUID_EXACT_MATCH_MASK },
157 CPU_TABLE_END
160 static const struct cpu_driver driver __cpu_driver = {
161 .ops = &cpu_dev_ops,
162 .id_table = cpu_table,
165 #define CPU_BCLK 100
167 static void set_max_turbo_freq(void)
169 msr_t msr, perf_ctl;
171 FUNC_ENTER();
172 perf_ctl.hi = 0;
174 /* Check for configurable TDP option */
175 if (get_turbo_state() == TURBO_ENABLED) {
176 msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
177 perf_ctl.lo = (msr.lo & 0xff) << 8;
178 } else if (cpu_config_tdp_levels()) {
179 /* Set to nominal TDP ratio */
180 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
181 perf_ctl.lo = (msr.lo & 0xff) << 8;
182 } else {
183 /* Platform Info bits 15:8 give max ratio */
184 msr = rdmsr(MSR_PLATFORM_INFO);
185 perf_ctl.lo = msr.lo & 0xff00;
187 wrmsr(IA32_PERF_CTL, perf_ctl);
189 printk(BIOS_DEBUG, "cpu: frequency set to %d\n",
190 ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
191 FUNC_EXIT();
195 * Do essential initialization tasks before APs can be fired up
197 * Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
198 * creates the MTRR solution that the APs will use. Otherwise APs will try to
199 * apply the incomplete solution as the BSP is calculating it.
201 static void pre_mp_init(void)
203 printk(BIOS_DEBUG, "%s: entry\n", __func__);
205 x86_setup_mtrrs_with_detect();
206 x86_mtrr_check();
209 static void post_mp_init(void)
211 /* Set Max Ratio */
212 set_max_turbo_freq();
214 if (CONFIG(HAVE_SMI_HANDLER))
215 global_smi_enable();
219 * CPU initialization recipe
221 * Note that no microcode update is passed to the init function. CSE updates
222 * the microcode on all cores before releasing them from reset. That means that
223 * the BSP and all APs will come up with the same microcode revision.
225 static const struct mp_ops mp_ops = {
226 .pre_mp_init = pre_mp_init,
227 .get_cpu_count = get_platform_thread_count,
228 .get_smm_info = get_smm_info,
229 .pre_mp_smm_init = smm_southbridge_clear_state,
230 .relocation_handler = smm_relocation_handler,
231 .post_mp_init = post_mp_init,
234 void mp_init_cpus(struct bus *bus)
236 FUNC_ENTER();
239 * This gets used in cpu device callback. Other than cpu 0,
240 * rest of the CPU devices do not have
241 * chip_info updated. Global chip_config is used as workaround
243 chip_config = bus->dev->chip_info;
245 config_reset_cpl3_csrs();
247 /* calls src/cpu/x86/mp_init.c */
248 /* TODO: Handle mp_init_with_smm failure? */
249 mp_init_with_smm(bus, &mp_ops);
251 FUNC_EXIT();