soc/intel/ptl: Update ME specification version to 21
[coreboot.git] / src / cpu / intel / common / common_init.c
blob55bc59eb75389d694ffaab5eb1393b4db422ddb2
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <acpi/acpigen.h>
4 #include <console/console.h>
5 #include <cpu/cpu.h>
6 #include <cpu/intel/msr.h>
7 #include <cpu/intel/turbo.h>
8 #include <cpu/x86/msr.h>
9 #include <types.h>
11 #include "common.h"
13 #define CPUID_6_ECX_EPB (1 << 3)
14 #define CPUID_6_ENGERY_PERF_PREF (1 << 10)
15 #define CPUID_6_HWP (1 << 7)
17 /* Structured Extended Feature Flags */
18 #define CPUID_EXT_FEATURE_TME_SUPPORTED (1 << 13)
20 void set_vmx_and_lock(void)
22 set_feature_ctrl_vmx();
23 set_feature_ctrl_lock();
26 void set_feature_ctrl_vmx_arg(bool enable)
28 msr_t msr;
29 uint32_t feature_flag;
31 feature_flag = cpu_get_feature_flags_ecx();
32 /* Check that the VMX is supported before reading or writing the MSR. */
33 if (!((feature_flag & CPUID_VMX) || (feature_flag & CPUID_SMX))) {
34 printk(BIOS_DEBUG, "CPU doesn't support VMX; exiting\n");
35 return;
38 msr = rdmsr(IA32_FEATURE_CONTROL);
40 if (msr.lo & (1 << 0)) {
41 printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked; ");
42 printk(BIOS_DEBUG, "VMX status: %s\n", msr.lo & (1 << 2) ?
43 "enabled" : "disabled");
44 /* IA32_FEATURE_CONTROL locked. If we set it again we get an
45 * illegal instruction
47 return;
50 /* The IA32_FEATURE_CONTROL MSR may initialize with random values.
51 * It must be cleared regardless of VMX config setting.
53 msr.hi = msr.lo = 0;
55 if (enable) {
56 msr.lo |= (1 << 2);
57 if (feature_flag & CPUID_SMX) {
58 msr.lo |= (1 << 1);
59 if (CONFIG(INTEL_TXT)) {
60 /* Enable GetSec and all GetSec leaves */
61 msr.lo |= (0xff << 8);
66 wrmsr(IA32_FEATURE_CONTROL, msr);
68 printk(BIOS_DEBUG, "VMX status: %s\n",
69 enable ? "enabled" : "disabled");
72 void set_feature_ctrl_vmx(void)
74 set_feature_ctrl_vmx_arg(CONFIG(ENABLE_VMX));
77 void set_feature_ctrl_lock(void)
79 msr_t msr;
80 int lock = CONFIG(SET_IA32_FC_LOCK_BIT);
81 uint32_t feature_flag = cpu_get_feature_flags_ecx();
83 /* Check if VMX is supported before reading or writing the MSR */
84 if (!((feature_flag & CPUID_VMX) || (feature_flag & CPUID_SMX))) {
85 printk(BIOS_DEBUG, "Read IA32_FEATURE_CONTROL unsupported\n");
86 return;
89 msr = rdmsr(IA32_FEATURE_CONTROL);
91 if (msr.lo & (1 << 0)) {
92 printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL already locked\n");
93 /* IA32_FEATURE_CONTROL locked. If we set it again we get an
94 * illegal instruction
96 return;
99 if (lock) {
100 /* Set lock bit */
101 msr.lo |= (1 << 0);
102 wrmsr(IA32_FEATURE_CONTROL, msr);
105 printk(BIOS_DEBUG, "IA32_FEATURE_CONTROL status: %s\n",
106 lock ? "locked" : "unlocked");
110 * Init cppc_config in a way that's appropriate for Intel
111 * processors with Intel Enhanced Speed Step Technology.
112 * NOTE: version 2 is expected to be the typical use case.
113 * For now this function 'punts' on version 3 and just
114 * populates the additional fields with 'unsupported'.
116 void cpu_init_cppc_config(struct cppc_config *config, u32 version)
118 config->version = version;
120 config->entries[CPPC_HIGHEST_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 0, 8);
121 config->entries[CPPC_NOMINAL_PERF] = CPPC_REG_MSR(MSR_PLATFORM_INFO, 8, 8);
122 config->entries[CPPC_LOWEST_NONL_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 16, 8);
123 config->entries[CPPC_LOWEST_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 24, 8);
124 config->entries[CPPC_GUARANTEED_PERF] = CPPC_REG_MSR(IA32_HWP_CAPABILITIES, 8, 8);
125 config->entries[CPPC_DESIRED_PERF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 16, 8);
126 config->entries[CPPC_MIN_PERF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 0, 8);
127 config->entries[CPPC_MAX_PERF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 8, 8);
128 config->entries[CPPC_PERF_REDUCE_TOLERANCE] = CPPC_UNSUPPORTED;
129 config->entries[CPPC_TIME_WINDOW] = CPPC_UNSUPPORTED;
130 config->entries[CPPC_COUNTER_WRAP] = CPPC_UNSUPPORTED;
131 config->entries[CPPC_REF_PERF_COUNTER] = CPPC_REG_MSR(IA32_MPERF, 0, 64);
132 config->entries[CPPC_DELIVERED_PERF_COUNTER] = CPPC_REG_MSR(IA32_APERF, 0, 64);
133 config->entries[CPPC_PERF_LIMITED] = CPPC_REG_MSR(IA32_HWP_STATUS, 2, 1);
134 config->entries[CPPC_ENABLE] = CPPC_REG_MSR(IA32_PM_ENABLE, 0, 1);
136 if (version < 2)
137 return;
139 config->entries[CPPC_AUTO_SELECT] = CPPC_DWORD(1);
140 config->entries[CPPC_AUTO_ACTIVITY_WINDOW] = CPPC_REG_MSR(IA32_HWP_REQUEST, 32, 10);
141 config->entries[CPPC_PERF_PREF] = CPPC_REG_MSR(IA32_HWP_REQUEST, 24, 8);
142 config->entries[CPPC_REF_PERF] = CPPC_UNSUPPORTED;
144 if (version < 3)
145 return;
147 config->entries[CPPC_LOWEST_FREQ] = CPPC_UNSUPPORTED;
148 config->entries[CPPC_NOMINAL_FREQ] = CPPC_UNSUPPORTED;
151 void set_aesni_lock(void)
153 msr_t msr;
155 if (!CONFIG(SET_MSR_AESNI_LOCK_BIT))
156 return;
158 if (!(cpu_get_feature_flags_ecx() & CPUID_AES))
159 return;
161 /* Only run once per core as specified in the MSR datasheet */
162 if (intel_ht_sibling())
163 return;
165 msr = rdmsr(MSR_FEATURE_CONFIG);
166 if (msr.lo & AESNI_LOCK)
167 return;
169 msr_set(MSR_FEATURE_CONFIG, AESNI_LOCK);
172 void enable_lapic_tpr(void)
174 msr_unset(MSR_PIC_MSG_CONTROL, TPR_UPDATES_DISABLE);
177 void configure_dca_cap(void)
179 if (cpu_get_feature_flags_ecx() & CPUID_DCA)
180 msr_set(IA32_PLATFORM_DCA_CAP, DCA_TYPE0_EN);
183 void set_energy_perf_bias(u8 policy)
185 u8 epb = policy & ENERGY_POLICY_MASK;
187 if (!(cpuid_ecx(6) & CPUID_6_ECX_EPB))
188 return;
190 msr_unset_and_set(IA32_ENERGY_PERF_BIAS, ENERGY_POLICY_MASK, epb);
191 printk(BIOS_DEBUG, "cpu: energy policy set to %u\n", epb);
195 * Check energy performance preference and HWP capabilities from Thermal and
196 * Power Management Leaf CPUID
198 bool check_energy_perf_cap(void)
200 const u32 cap = cpuid_eax(CPUID_LEAF_PM);
201 if (!(cap & CPUID_6_ENGERY_PERF_PREF))
202 return false;
203 if (!(cap & CPUID_6_HWP))
204 return false;
205 return true;
209 * Instructs the CPU to use EPP hints. This means that any energy policies set
210 * up in `set_energy_perf_bias` will be ignored afterwards.
212 void enable_energy_perf_pref(void)
214 msr_t msr = rdmsr(IA32_PM_ENABLE);
215 if (!(msr.lo & HWP_ENABLE)) {
216 /* Package-scoped MSR */
217 printk(BIOS_DEBUG, "HWP_ENABLE: energy-perf preference in favor of energy-perf bias\n");
218 msr_set(IA32_PM_ENABLE, HWP_ENABLE);
223 * Set the IA32_HWP_REQUEST Energy-Performance Preference bits on the logical
224 * thread. 0 is a hint to the HWP to prefer performance, and 255 is a hint to
225 * prefer energy efficiency.
226 * This function needs to be called when HWP_ENABLE is set.
228 void set_energy_perf_pref(u8 pref)
230 msr_unset_and_set(IA32_HWP_REQUEST, IA32_HWP_REQUEST_EPP_MASK,
231 (uint64_t)pref << IA32_HWP_REQUEST_EPP_SHIFT);
234 bool is_tme_supported(void)
236 struct cpuid_result cpuid_regs;
238 cpuid_regs = cpuid_ext(CPUID_STRUCT_EXTENDED_FEATURE_FLAGS, 0x0);
239 return (cpuid_regs.ecx & CPUID_EXT_FEATURE_TME_SUPPORTED);
243 * Get number of address bits used by Total Memory Encryption (TME)
245 * Returns TME_ACTIVATE[MK_TME_KEYID_BITS] (MSR 0x982 Bits[32-35]).
247 * NOTE: This function should be called after MK-TME features has been
248 * configured in the MSRs according to the capabilities and platform
249 * configuration. For instance, after FSP-M.
251 static unsigned int get_tme_keyid_bits(void)
253 msr_t msr;
255 msr = rdmsr(MSR_TME_ACTIVATE);
256 return msr.hi & TME_ACTIVATE_HI_KEYID_BITS_MASK;
259 unsigned int get_reserved_phys_addr_bits(void)
261 if (!is_tme_supported())
262 return 0;
264 return get_tme_keyid_bits();