2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/types.h>
21 #include <asm/cputype.h>
22 #include <asm/cpufeature.h>
24 static bool __maybe_unused
25 is_affected_midr_range(const struct arm64_cpu_capabilities
*entry
, int scope
)
27 WARN_ON(scope
!= SCOPE_LOCAL_CPU
|| preemptible());
28 return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry
->midr_model
,
29 entry
->midr_range_min
,
30 entry
->midr_range_max
);
33 static bool __maybe_unused
34 is_kryo_midr(const struct arm64_cpu_capabilities
*entry
, int scope
)
38 WARN_ON(scope
!= SCOPE_LOCAL_CPU
|| preemptible());
40 model
= read_cpuid_id();
41 model
&= MIDR_IMPLEMENTOR_MASK
| (0xf00 << MIDR_PARTNUM_SHIFT
) |
42 MIDR_ARCHITECTURE_MASK
;
44 return model
== entry
->midr_model
;
48 has_mismatched_cache_line_size(const struct arm64_cpu_capabilities
*entry
,
51 WARN_ON(scope
!= SCOPE_LOCAL_CPU
|| preemptible());
52 return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0
.strict_mask
) !=
53 (arm64_ftr_reg_ctrel0
.sys_val
& arm64_ftr_reg_ctrel0
.strict_mask
);
56 static int cpu_enable_trap_ctr_access(void *__unused
)
58 /* Clear SCTLR_EL1.UCT */
59 config_sctlr_el1(SCTLR_EL1_UCT
, 0);
63 atomic_t arm64_el2_vector_last_slot
= ATOMIC_INIT(-1);
65 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
66 #include <asm/mmu_context.h>
67 #include <asm/cacheflush.h>
69 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data
, bp_hardening_data
);
72 extern char __smccc_workaround_1_smc_start
[];
73 extern char __smccc_workaround_1_smc_end
[];
74 extern char __smccc_workaround_1_hvc_start
[];
75 extern char __smccc_workaround_1_hvc_end
[];
77 static void __copy_hyp_vect_bpi(int slot
, const char *hyp_vecs_start
,
78 const char *hyp_vecs_end
)
80 void *dst
= lm_alias(__bp_harden_hyp_vecs_start
+ slot
* SZ_2K
);
83 for (i
= 0; i
< SZ_2K
; i
+= 0x80)
84 memcpy(dst
+ i
, hyp_vecs_start
, hyp_vecs_end
- hyp_vecs_start
);
86 flush_icache_range((uintptr_t)dst
, (uintptr_t)dst
+ SZ_2K
);
89 static void __install_bp_hardening_cb(bp_hardening_cb_t fn
,
90 const char *hyp_vecs_start
,
91 const char *hyp_vecs_end
)
93 static DEFINE_SPINLOCK(bp_lock
);
97 for_each_possible_cpu(cpu
) {
98 if (per_cpu(bp_hardening_data
.fn
, cpu
) == fn
) {
99 slot
= per_cpu(bp_hardening_data
.hyp_vectors_slot
, cpu
);
105 slot
= atomic_inc_return(&arm64_el2_vector_last_slot
);
106 BUG_ON(slot
>= BP_HARDEN_EL2_SLOTS
);
107 __copy_hyp_vect_bpi(slot
, hyp_vecs_start
, hyp_vecs_end
);
110 __this_cpu_write(bp_hardening_data
.hyp_vectors_slot
, slot
);
111 __this_cpu_write(bp_hardening_data
.fn
, fn
);
112 spin_unlock(&bp_lock
);
115 #define __smccc_workaround_1_smc_start NULL
116 #define __smccc_workaround_1_smc_end NULL
117 #define __smccc_workaround_1_hvc_start NULL
118 #define __smccc_workaround_1_hvc_end NULL
120 static void __install_bp_hardening_cb(bp_hardening_cb_t fn
,
121 const char *hyp_vecs_start
,
122 const char *hyp_vecs_end
)
124 __this_cpu_write(bp_hardening_data
.fn
, fn
);
126 #endif /* CONFIG_KVM */
128 static void install_bp_hardening_cb(const struct arm64_cpu_capabilities
*entry
,
129 bp_hardening_cb_t fn
,
130 const char *hyp_vecs_start
,
131 const char *hyp_vecs_end
)
135 if (!entry
->matches(entry
, SCOPE_LOCAL_CPU
))
138 pfr0
= read_cpuid(ID_AA64PFR0_EL1
);
139 if (cpuid_feature_extract_unsigned_field(pfr0
, ID_AA64PFR0_CSV2_SHIFT
))
142 __install_bp_hardening_cb(fn
, hyp_vecs_start
, hyp_vecs_end
);
145 #include <uapi/linux/psci.h>
146 #include <linux/arm-smccc.h>
147 #include <linux/psci.h>
149 static void call_smc_arch_workaround_1(void)
151 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1
, NULL
);
154 static void call_hvc_arch_workaround_1(void)
156 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1
, NULL
);
159 static void qcom_link_stack_sanitization(void)
163 asm volatile("mov %0, x30 \n"
171 static int enable_smccc_arch_workaround_1(void *data
)
173 const struct arm64_cpu_capabilities
*entry
= data
;
174 bp_hardening_cb_t cb
;
175 void *smccc_start
, *smccc_end
;
176 struct arm_smccc_res res
;
177 u32 midr
= read_cpuid_id();
179 if (!entry
->matches(entry
, SCOPE_LOCAL_CPU
))
182 if (psci_ops
.smccc_version
== SMCCC_VERSION_1_0
)
185 switch (psci_ops
.conduit
) {
186 case PSCI_CONDUIT_HVC
:
187 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID
,
188 ARM_SMCCC_ARCH_WORKAROUND_1
, &res
);
191 cb
= call_hvc_arch_workaround_1
;
192 smccc_start
= __smccc_workaround_1_hvc_start
;
193 smccc_end
= __smccc_workaround_1_hvc_end
;
196 case PSCI_CONDUIT_SMC
:
197 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID
,
198 ARM_SMCCC_ARCH_WORKAROUND_1
, &res
);
201 cb
= call_smc_arch_workaround_1
;
202 smccc_start
= __smccc_workaround_1_smc_start
;
203 smccc_end
= __smccc_workaround_1_smc_end
;
210 if (((midr
& MIDR_CPU_MODEL_MASK
) == MIDR_QCOM_FALKOR
) ||
211 ((midr
& MIDR_CPU_MODEL_MASK
) == MIDR_QCOM_FALKOR_V1
))
212 cb
= qcom_link_stack_sanitization
;
214 install_bp_hardening_cb(entry
, cb
, smccc_start
, smccc_end
);
219 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
221 #define MIDR_RANGE(model, min, max) \
222 .def_scope = SCOPE_LOCAL_CPU, \
223 .matches = is_affected_midr_range, \
224 .midr_model = model, \
225 .midr_range_min = min, \
226 .midr_range_max = max
228 #define MIDR_ALL_VERSIONS(model) \
229 .def_scope = SCOPE_LOCAL_CPU, \
230 .matches = is_affected_midr_range, \
231 .midr_model = model, \
232 .midr_range_min = 0, \
233 .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
235 const struct arm64_cpu_capabilities arm64_errata
[] = {
236 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
237 defined(CONFIG_ARM64_ERRATUM_827319) || \
238 defined(CONFIG_ARM64_ERRATUM_824069)
240 /* Cortex-A53 r0p[012] */
241 .desc
= "ARM errata 826319, 827319, 824069",
242 .capability
= ARM64_WORKAROUND_CLEAN_CACHE
,
243 MIDR_RANGE(MIDR_CORTEX_A53
, 0x00, 0x02),
244 .enable
= cpu_enable_cache_maint_trap
,
247 #ifdef CONFIG_ARM64_ERRATUM_819472
249 /* Cortex-A53 r0p[01] */
250 .desc
= "ARM errata 819472",
251 .capability
= ARM64_WORKAROUND_CLEAN_CACHE
,
252 MIDR_RANGE(MIDR_CORTEX_A53
, 0x00, 0x01),
253 .enable
= cpu_enable_cache_maint_trap
,
256 #ifdef CONFIG_ARM64_ERRATUM_832075
258 /* Cortex-A57 r0p0 - r1p2 */
259 .desc
= "ARM erratum 832075",
260 .capability
= ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE
,
261 MIDR_RANGE(MIDR_CORTEX_A57
,
262 MIDR_CPU_VAR_REV(0, 0),
263 MIDR_CPU_VAR_REV(1, 2)),
266 #ifdef CONFIG_ARM64_ERRATUM_834220
268 /* Cortex-A57 r0p0 - r1p2 */
269 .desc
= "ARM erratum 834220",
270 .capability
= ARM64_WORKAROUND_834220
,
271 MIDR_RANGE(MIDR_CORTEX_A57
,
272 MIDR_CPU_VAR_REV(0, 0),
273 MIDR_CPU_VAR_REV(1, 2)),
276 #ifdef CONFIG_ARM64_ERRATUM_845719
278 /* Cortex-A53 r0p[01234] */
279 .desc
= "ARM erratum 845719",
280 .capability
= ARM64_WORKAROUND_845719
,
281 MIDR_RANGE(MIDR_CORTEX_A53
, 0x00, 0x04),
284 #ifdef CONFIG_CAVIUM_ERRATUM_23154
286 /* Cavium ThunderX, pass 1.x */
287 .desc
= "Cavium erratum 23154",
288 .capability
= ARM64_WORKAROUND_CAVIUM_23154
,
289 MIDR_RANGE(MIDR_THUNDERX
, 0x00, 0x01),
292 #ifdef CONFIG_CAVIUM_ERRATUM_27456
294 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
295 .desc
= "Cavium erratum 27456",
296 .capability
= ARM64_WORKAROUND_CAVIUM_27456
,
297 MIDR_RANGE(MIDR_THUNDERX
,
298 MIDR_CPU_VAR_REV(0, 0),
299 MIDR_CPU_VAR_REV(1, 1)),
302 /* Cavium ThunderX, T81 pass 1.0 */
303 .desc
= "Cavium erratum 27456",
304 .capability
= ARM64_WORKAROUND_CAVIUM_27456
,
305 MIDR_RANGE(MIDR_THUNDERX_81XX
, 0x00, 0x00),
308 #ifdef CONFIG_CAVIUM_ERRATUM_30115
310 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
311 .desc
= "Cavium erratum 30115",
312 .capability
= ARM64_WORKAROUND_CAVIUM_30115
,
313 MIDR_RANGE(MIDR_THUNDERX
, 0x00,
314 (1 << MIDR_VARIANT_SHIFT
) | 2),
317 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
318 .desc
= "Cavium erratum 30115",
319 .capability
= ARM64_WORKAROUND_CAVIUM_30115
,
320 MIDR_RANGE(MIDR_THUNDERX_81XX
, 0x00, 0x02),
323 /* Cavium ThunderX, T83 pass 1.0 */
324 .desc
= "Cavium erratum 30115",
325 .capability
= ARM64_WORKAROUND_CAVIUM_30115
,
326 MIDR_RANGE(MIDR_THUNDERX_83XX
, 0x00, 0x00),
330 .desc
= "Mismatched cache line size",
331 .capability
= ARM64_MISMATCHED_CACHE_LINE_SIZE
,
332 .matches
= has_mismatched_cache_line_size
,
333 .def_scope
= SCOPE_LOCAL_CPU
,
334 .enable
= cpu_enable_trap_ctr_access
,
336 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
338 .desc
= "Qualcomm Technologies Falkor erratum 1003",
339 .capability
= ARM64_WORKAROUND_QCOM_FALKOR_E1003
,
340 MIDR_RANGE(MIDR_QCOM_FALKOR_V1
,
341 MIDR_CPU_VAR_REV(0, 0),
342 MIDR_CPU_VAR_REV(0, 0)),
345 .desc
= "Qualcomm Technologies Kryo erratum 1003",
346 .capability
= ARM64_WORKAROUND_QCOM_FALKOR_E1003
,
347 .def_scope
= SCOPE_LOCAL_CPU
,
348 .midr_model
= MIDR_QCOM_KRYO
,
349 .matches
= is_kryo_midr
,
352 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
354 .desc
= "Qualcomm Technologies Falkor erratum 1009",
355 .capability
= ARM64_WORKAROUND_REPEAT_TLBI
,
356 MIDR_RANGE(MIDR_QCOM_FALKOR_V1
,
357 MIDR_CPU_VAR_REV(0, 0),
358 MIDR_CPU_VAR_REV(0, 0)),
361 #ifdef CONFIG_ARM64_ERRATUM_858921
363 /* Cortex-A73 all versions */
364 .desc
= "ARM erratum 858921",
365 .capability
= ARM64_WORKAROUND_858921
,
366 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73
),
369 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
371 .capability
= ARM64_HARDEN_BRANCH_PREDICTOR
,
372 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57
),
373 .enable
= enable_smccc_arch_workaround_1
,
376 .capability
= ARM64_HARDEN_BRANCH_PREDICTOR
,
377 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72
),
378 .enable
= enable_smccc_arch_workaround_1
,
381 .capability
= ARM64_HARDEN_BRANCH_PREDICTOR
,
382 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73
),
383 .enable
= enable_smccc_arch_workaround_1
,
386 .capability
= ARM64_HARDEN_BRANCH_PREDICTOR
,
387 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75
),
388 .enable
= enable_smccc_arch_workaround_1
,
391 .capability
= ARM64_HARDEN_BRANCH_PREDICTOR
,
392 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1
),
393 .enable
= enable_smccc_arch_workaround_1
,
396 .capability
= ARM64_HARDEN_BRANCH_PREDICTOR
,
397 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR
),
398 .enable
= enable_smccc_arch_workaround_1
,
401 .capability
= ARM64_HARDEN_BRANCH_PREDICTOR
,
402 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN
),
403 .enable
= enable_smccc_arch_workaround_1
,
406 .capability
= ARM64_HARDEN_BRANCH_PREDICTOR
,
407 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2
),
408 .enable
= enable_smccc_arch_workaround_1
,
411 #ifdef CONFIG_HARDEN_EL2_VECTORS
413 .desc
= "Cortex-A57 EL2 vector hardening",
414 .capability
= ARM64_HARDEN_EL2_VECTORS
,
415 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57
),
418 .desc
= "Cortex-A72 EL2 vector hardening",
419 .capability
= ARM64_HARDEN_EL2_VECTORS
,
420 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72
),
428 * The CPU Errata work arounds are detected and applied at boot time
429 * and the related information is freed soon after. If the new CPU requires
430 * an errata not detected at boot, fail this CPU.
432 void verify_local_cpu_errata_workarounds(void)
434 const struct arm64_cpu_capabilities
*caps
= arm64_errata
;
436 for (; caps
->matches
; caps
++) {
437 if (cpus_have_cap(caps
->capability
)) {
439 caps
->enable((void *)caps
);
440 } else if (caps
->matches(caps
, SCOPE_LOCAL_CPU
)) {
441 pr_crit("CPU%d: Requires work around for %s, not detected"
444 caps
->desc
? : "an erratum");
450 void update_cpu_errata_workarounds(void)
452 update_cpu_capabilities(arm64_errata
, "enabling workaround for");
455 void __init
enable_errata_workarounds(void)
457 enable_cpu_capabilities(arm64_errata
);