drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / x86 / kernel / acpi / cppc.c
blob956984054bf300494c687a10cc3e14953a0ae215
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * cppc.c: CPPC Interface for x86
4 * Copyright (c) 2016, Intel Corporation.
5 */
7 #include <acpi/cppc_acpi.h>
8 #include <asm/msr.h>
9 #include <asm/processor.h>
10 #include <asm/topology.h>
12 #define CPPC_HIGHEST_PERF_PERFORMANCE 196
13 #define CPPC_HIGHEST_PERF_PREFCORE 166
15 enum amd_pref_core {
16 AMD_PREF_CORE_UNKNOWN = 0,
17 AMD_PREF_CORE_SUPPORTED,
18 AMD_PREF_CORE_UNSUPPORTED,
20 static enum amd_pref_core amd_pref_core_detected;
21 static u64 boost_numerator;
23 /* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
25 bool cpc_supported_by_cpu(void)
27 switch (boot_cpu_data.x86_vendor) {
28 case X86_VENDOR_AMD:
29 case X86_VENDOR_HYGON:
30 if (boot_cpu_data.x86 == 0x19 && ((boot_cpu_data.x86_model <= 0x0f) ||
31 (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
32 return true;
33 else if (boot_cpu_data.x86 == 0x17 &&
34 boot_cpu_data.x86_model >= 0x30 && boot_cpu_data.x86_model <= 0x7f)
35 return true;
36 return boot_cpu_has(X86_FEATURE_CPPC);
38 return false;
41 bool cpc_ffh_supported(void)
43 return true;
46 int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
48 int err;
50 err = rdmsrl_safe_on_cpu(cpunum, reg->address, val);
51 if (!err) {
52 u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
53 reg->bit_offset);
55 *val &= mask;
56 *val >>= reg->bit_offset;
58 return err;
61 int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
63 u64 rd_val;
64 int err;
66 err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val);
67 if (!err) {
68 u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
69 reg->bit_offset);
71 val <<= reg->bit_offset;
72 val &= mask;
73 rd_val &= ~mask;
74 rd_val |= val;
75 err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val);
77 return err;
80 static void amd_set_max_freq_ratio(void)
82 struct cppc_perf_caps perf_caps;
83 u64 numerator, nominal_perf;
84 u64 perf_ratio;
85 int rc;
87 rc = cppc_get_perf_caps(0, &perf_caps);
88 if (rc) {
89 pr_warn("Could not retrieve perf counters (%d)\n", rc);
90 return;
93 rc = amd_get_boost_ratio_numerator(0, &numerator);
94 if (rc) {
95 pr_warn("Could not retrieve highest performance (%d)\n", rc);
96 return;
98 nominal_perf = perf_caps.nominal_perf;
100 if (!nominal_perf) {
101 pr_warn("Could not retrieve nominal performance\n");
102 return;
105 /* midpoint between max_boost and max_P */
106 perf_ratio = (div_u64(numerator * SCHED_CAPACITY_SCALE, nominal_perf) + SCHED_CAPACITY_SCALE) >> 1;
108 freq_invariance_set_perf_ratio(perf_ratio, false);
111 static DEFINE_MUTEX(freq_invariance_lock);
113 void init_freq_invariance_cppc(void)
115 static bool init_done;
117 if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
118 return;
120 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
121 return;
123 mutex_lock(&freq_invariance_lock);
124 if (!init_done)
125 amd_set_max_freq_ratio();
126 init_done = true;
127 mutex_unlock(&freq_invariance_lock);
131 * Get the highest performance register value.
132 * @cpu: CPU from which to get highest performance.
133 * @highest_perf: Return address for highest performance value.
135 * Return: 0 for success, negative error code otherwise.
137 int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
139 u64 val;
140 int ret;
142 if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
143 ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val);
144 if (ret)
145 goto out;
147 val = AMD_CPPC_HIGHEST_PERF(val);
148 } else {
149 ret = cppc_get_highest_perf(cpu, &val);
150 if (ret)
151 goto out;
154 WRITE_ONCE(*highest_perf, (u32)val);
155 out:
156 return ret;
158 EXPORT_SYMBOL_GPL(amd_get_highest_perf);
161 * amd_detect_prefcore: Detect if CPUs in the system support preferred cores
162 * @detected: Output variable for the result of the detection.
164 * Determine whether CPUs in the system support preferred cores. On systems
165 * that support preferred cores, different highest perf values will be found
166 * on different cores. On other systems, the highest perf value will be the
167 * same on all cores.
169 * The result of the detection will be stored in the 'detected' parameter.
171 * Return: 0 for success, negative error code otherwise
173 int amd_detect_prefcore(bool *detected)
175 int cpu, count = 0;
176 u64 highest_perf[2] = {0};
178 if (WARN_ON(!detected))
179 return -EINVAL;
181 switch (amd_pref_core_detected) {
182 case AMD_PREF_CORE_SUPPORTED:
183 *detected = true;
184 return 0;
185 case AMD_PREF_CORE_UNSUPPORTED:
186 *detected = false;
187 return 0;
188 default:
189 break;
192 for_each_present_cpu(cpu) {
193 u32 tmp;
194 int ret;
196 ret = amd_get_highest_perf(cpu, &tmp);
197 if (ret)
198 return ret;
200 if (!count || (count == 1 && tmp != highest_perf[0]))
201 highest_perf[count++] = tmp;
203 if (count == 2)
204 break;
207 *detected = (count == 2);
208 boost_numerator = highest_perf[0];
210 amd_pref_core_detected = *detected ? AMD_PREF_CORE_SUPPORTED :
211 AMD_PREF_CORE_UNSUPPORTED;
213 pr_debug("AMD CPPC preferred core is %ssupported (highest perf: 0x%llx)\n",
214 *detected ? "" : "un", highest_perf[0]);
216 return 0;
218 EXPORT_SYMBOL_GPL(amd_detect_prefcore);
221 * amd_get_boost_ratio_numerator: Get the numerator to use for boost ratio calculation
222 * @cpu: CPU to get numerator for.
223 * @numerator: Output variable for numerator.
225 * Determine the numerator to use for calculating the boost ratio on
226 * a CPU. On systems that support preferred cores, this will be a hardcoded
227 * value. On other systems this will the highest performance register value.
229 * If booting the system with amd-pstate enabled but preferred cores disabled then
230 * the correct boost numerator will be returned to match hardware capabilities
231 * even if the preferred cores scheduling hints are not enabled.
233 * Return: 0 for success, negative error code otherwise.
235 int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
237 bool prefcore;
238 int ret;
240 ret = amd_detect_prefcore(&prefcore);
241 if (ret)
242 return ret;
244 /* without preferred cores, return the highest perf register value */
245 if (!prefcore) {
246 *numerator = boost_numerator;
247 return 0;
251 * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
252 * the highest performance level is set to 196.
253 * https://bugzilla.kernel.org/show_bug.cgi?id=218759
255 if (cpu_feature_enabled(X86_FEATURE_ZEN4)) {
256 switch (boot_cpu_data.x86_model) {
257 case 0x70 ... 0x7f:
258 *numerator = CPPC_HIGHEST_PERF_PERFORMANCE;
259 return 0;
260 default:
261 break;
264 *numerator = CPPC_HIGHEST_PERF_PREFCORE;
266 return 0;
268 EXPORT_SYMBOL_GPL(amd_get_boost_ratio_numerator);