1 // SPDX-License-Identifier: GPL-2.0-only
3 * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
6 * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
8 * This code was originally written hastily under an awful lot of stress and so
9 * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10 * instantly makes me feel ill. Thanks, Jann. Thann.
12 * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13 * Copyright (C) 2020 Google LLC
15 * "If there's something strange in your neighbourhood, who you gonna call?"
17 * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
20 #include <linux/arm-smccc.h>
21 #include <linux/bpf.h>
22 #include <linux/cpu.h>
23 #include <linux/device.h>
24 #include <linux/nospec.h>
25 #include <linux/prctl.h>
26 #include <linux/sched/task_stack.h>
28 #include <asm/debug-monitors.h>
30 #include <asm/spectre.h>
31 #include <asm/traps.h>
32 #include <asm/vectors.h>
36 * We try to ensure that the mitigation state can never change as the result of
37 * onlining a late CPU.
39 static void update_mitigation_state(enum mitigation_state
*oldp
,
40 enum mitigation_state
new)
42 enum mitigation_state state
;
45 state
= READ_ONCE(*oldp
);
49 /* Userspace almost certainly can't deal with this. */
50 if (WARN_ON(system_capabilities_finalized()))
52 } while (cmpxchg_relaxed(oldp
, state
, new) != state
);
58 * The kernel can't protect userspace for this one: it's each person for
59 * themselves. Advertise what we're doing and be done with it.
61 ssize_t
cpu_show_spectre_v1(struct device
*dev
, struct device_attribute
*attr
,
64 return sprintf(buf
, "Mitigation: __user pointer sanitization\n");
70 * This one sucks. A CPU is either:
72 * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
73 * - Mitigated in hardware and listed in our "safe list".
74 * - Mitigated in software by firmware.
75 * - Mitigated in software by a CPU-specific dance in the kernel and a
76 * firmware call at EL2.
79 * It's not unlikely for different CPUs in a big.LITTLE system to fall into
82 static enum mitigation_state spectre_v2_state
;
84 static bool __read_mostly __nospectre_v2
;
85 static int __init
parse_spectre_v2_param(char *str
)
87 __nospectre_v2
= true;
90 early_param("nospectre_v2", parse_spectre_v2_param
);
92 static bool spectre_v2_mitigations_off(void)
94 bool ret
= __nospectre_v2
|| cpu_mitigations_off();
97 pr_info_once("spectre-v2 mitigation disabled by command line option\n");
102 static const char *get_bhb_affected_string(enum mitigation_state bhb_state
)
105 case SPECTRE_UNAFFECTED
:
108 case SPECTRE_VULNERABLE
:
109 return ", but not BHB";
110 case SPECTRE_MITIGATED
:
115 static bool _unprivileged_ebpf_enabled(void)
117 #ifdef CONFIG_BPF_SYSCALL
118 return !sysctl_unprivileged_bpf_disabled
;
124 ssize_t
cpu_show_spectre_v2(struct device
*dev
, struct device_attribute
*attr
,
127 enum mitigation_state bhb_state
= arm64_get_spectre_bhb_state();
128 const char *bhb_str
= get_bhb_affected_string(bhb_state
);
129 const char *v2_str
= "Branch predictor hardening";
131 switch (spectre_v2_state
) {
132 case SPECTRE_UNAFFECTED
:
133 if (bhb_state
== SPECTRE_UNAFFECTED
)
134 return sprintf(buf
, "Not affected\n");
137 * Platforms affected by Spectre-BHB can't report
138 * "Not affected" for Spectre-v2.
142 case SPECTRE_MITIGATED
:
143 if (bhb_state
== SPECTRE_MITIGATED
&& _unprivileged_ebpf_enabled())
144 return sprintf(buf
, "Vulnerable: Unprivileged eBPF enabled\n");
146 return sprintf(buf
, "Mitigation: %s%s\n", v2_str
, bhb_str
);
147 case SPECTRE_VULNERABLE
:
150 return sprintf(buf
, "Vulnerable\n");
154 static enum mitigation_state
spectre_v2_get_cpu_hw_mitigation_state(void)
157 static const struct midr_range spectre_v2_safe_list
[] = {
158 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35
),
159 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53
),
160 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55
),
161 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53
),
162 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110
),
163 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER
),
164 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER
),
165 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER
),
169 /* If the CPU has CSV2 set, we're safe */
170 pfr0
= read_cpuid(ID_AA64PFR0_EL1
);
171 if (cpuid_feature_extract_unsigned_field(pfr0
, ID_AA64PFR0_EL1_CSV2_SHIFT
))
172 return SPECTRE_UNAFFECTED
;
174 /* Alternatively, we have a list of unaffected CPUs */
175 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list
))
176 return SPECTRE_UNAFFECTED
;
178 return SPECTRE_VULNERABLE
;
181 static enum mitigation_state
spectre_v2_get_cpu_fw_mitigation_state(void)
184 struct arm_smccc_res res
;
186 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID
,
187 ARM_SMCCC_ARCH_WORKAROUND_1
, &res
);
191 case SMCCC_RET_SUCCESS
:
192 return SPECTRE_MITIGATED
;
193 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED
:
194 return SPECTRE_UNAFFECTED
;
197 case SMCCC_RET_NOT_SUPPORTED
:
198 return SPECTRE_VULNERABLE
;
202 bool has_spectre_v2(const struct arm64_cpu_capabilities
*entry
, int scope
)
204 WARN_ON(scope
!= SCOPE_LOCAL_CPU
|| preemptible());
206 if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED
)
209 if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED
)
215 enum mitigation_state
arm64_get_spectre_v2_state(void)
217 return spectre_v2_state
;
220 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data
, bp_hardening_data
);
222 static void install_bp_hardening_cb(bp_hardening_cb_t fn
)
224 __this_cpu_write(bp_hardening_data
.fn
, fn
);
227 * Vinz Clortho takes the hyp_vecs start/end "keys" at
228 * the door when we're a guest. Skip the hyp-vectors work.
230 if (!is_hyp_mode_available())
233 __this_cpu_write(bp_hardening_data
.slot
, HYP_VECTOR_SPECTRE_DIRECT
);
236 /* Called during entry so must be noinstr */
237 static noinstr
void call_smc_arch_workaround_1(void)
239 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1
, NULL
);
242 /* Called during entry so must be noinstr */
243 static noinstr
void call_hvc_arch_workaround_1(void)
245 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1
, NULL
);
248 /* Called during entry so must be noinstr */
249 static noinstr
void qcom_link_stack_sanitisation(void)
253 asm volatile("mov %0, x30 \n"
261 static bp_hardening_cb_t
spectre_v2_get_sw_mitigation_cb(void)
263 u32 midr
= read_cpuid_id();
264 if (((midr
& MIDR_CPU_MODEL_MASK
) != MIDR_QCOM_FALKOR
) &&
265 ((midr
& MIDR_CPU_MODEL_MASK
) != MIDR_QCOM_FALKOR_V1
))
268 return qcom_link_stack_sanitisation
;
271 static enum mitigation_state
spectre_v2_enable_fw_mitigation(void)
273 bp_hardening_cb_t cb
;
274 enum mitigation_state state
;
276 state
= spectre_v2_get_cpu_fw_mitigation_state();
277 if (state
!= SPECTRE_MITIGATED
)
280 if (spectre_v2_mitigations_off())
281 return SPECTRE_VULNERABLE
;
283 switch (arm_smccc_1_1_get_conduit()) {
284 case SMCCC_CONDUIT_HVC
:
285 cb
= call_hvc_arch_workaround_1
;
288 case SMCCC_CONDUIT_SMC
:
289 cb
= call_smc_arch_workaround_1
;
293 return SPECTRE_VULNERABLE
;
297 * Prefer a CPU-specific workaround if it exists. Note that we
298 * still rely on firmware for the mitigation at EL2.
300 cb
= spectre_v2_get_sw_mitigation_cb() ?: cb
;
301 install_bp_hardening_cb(cb
);
302 return SPECTRE_MITIGATED
;
305 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities
*__unused
)
307 enum mitigation_state state
;
309 WARN_ON(preemptible());
311 state
= spectre_v2_get_cpu_hw_mitigation_state();
312 if (state
== SPECTRE_VULNERABLE
)
313 state
= spectre_v2_enable_fw_mitigation();
315 update_mitigation_state(&spectre_v2_state
, state
);
321 * Phew, there's not an awful lot to do here! We just instruct EL2 to use
322 * an indirect trampoline for the hyp vectors so that guests can't read
323 * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
325 bool has_spectre_v3a(const struct arm64_cpu_capabilities
*entry
, int scope
)
327 static const struct midr_range spectre_v3a_unsafe_list
[] = {
328 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57
),
329 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72
),
333 WARN_ON(scope
!= SCOPE_LOCAL_CPU
|| preemptible());
334 return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list
);
337 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities
*__unused
)
339 struct bp_hardening_data
*data
= this_cpu_ptr(&bp_hardening_data
);
341 if (this_cpu_has_cap(ARM64_SPECTRE_V3A
))
342 data
->slot
+= HYP_VECTOR_INDIRECT
;
348 * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
351 * - Mitigated in hardware and listed in our "safe list".
352 * - Mitigated in hardware via PSTATE.SSBS.
353 * - Mitigated in software by firmware (sometimes referred to as SSBD).
355 * Wait, that doesn't sound so bad, does it? Keep reading...
357 * A major source of headaches is that the software mitigation is enabled both
358 * on a per-task basis, but can also be forced on for the kernel, necessitating
359 * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
360 * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
361 * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
362 * so you can have systems that have both firmware and SSBS mitigations. This
363 * means we actually have to reject late onlining of CPUs with mitigations if
364 * all of the currently onlined CPUs are safelisted, as the mitigation tends to
365 * be opt-in for userspace. Yes, really, the cure is worse than the disease.
367 * The only good part is that if the firmware mitigation is present, then it is
368 * present for all CPUs, meaning we don't have to worry about late onlining of a
369 * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
371 * Give me a VAX-11/780 any day of the week...
373 static enum mitigation_state spectre_v4_state
;
375 /* This is the per-cpu state tracking whether we need to talk to firmware */
376 DEFINE_PER_CPU_READ_MOSTLY(u64
, arm64_ssbd_callback_required
);
378 enum spectre_v4_policy
{
379 SPECTRE_V4_POLICY_MITIGATION_DYNAMIC
,
380 SPECTRE_V4_POLICY_MITIGATION_ENABLED
,
381 SPECTRE_V4_POLICY_MITIGATION_DISABLED
,
384 static enum spectre_v4_policy __read_mostly __spectre_v4_policy
;
386 static const struct spectre_v4_param
{
388 enum spectre_v4_policy policy
;
389 } spectre_v4_params
[] = {
390 { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED
, },
391 { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED
, },
392 { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC
, },
394 static int __init
parse_spectre_v4_param(char *str
)
401 for (i
= 0; i
< ARRAY_SIZE(spectre_v4_params
); i
++) {
402 const struct spectre_v4_param
*param
= &spectre_v4_params
[i
];
404 if (strncmp(str
, param
->str
, strlen(param
->str
)))
407 __spectre_v4_policy
= param
->policy
;
413 early_param("ssbd", parse_spectre_v4_param
);
416 * Because this was all written in a rush by people working in different silos,
417 * we've ended up with multiple command line options to control the same thing.
418 * Wrap these up in some helpers, which prefer disabling the mitigation if faced
419 * with contradictory parameters. The mitigation is always either "off",
422 static bool spectre_v4_mitigations_off(void)
424 bool ret
= cpu_mitigations_off() ||
425 __spectre_v4_policy
== SPECTRE_V4_POLICY_MITIGATION_DISABLED
;
428 pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
433 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
434 static bool spectre_v4_mitigations_dynamic(void)
436 return !spectre_v4_mitigations_off() &&
437 __spectre_v4_policy
== SPECTRE_V4_POLICY_MITIGATION_DYNAMIC
;
440 static bool spectre_v4_mitigations_on(void)
442 return !spectre_v4_mitigations_off() &&
443 __spectre_v4_policy
== SPECTRE_V4_POLICY_MITIGATION_ENABLED
;
446 ssize_t
cpu_show_spec_store_bypass(struct device
*dev
,
447 struct device_attribute
*attr
, char *buf
)
449 switch (spectre_v4_state
) {
450 case SPECTRE_UNAFFECTED
:
451 return sprintf(buf
, "Not affected\n");
452 case SPECTRE_MITIGATED
:
453 return sprintf(buf
, "Mitigation: Speculative Store Bypass disabled via prctl\n");
454 case SPECTRE_VULNERABLE
:
457 return sprintf(buf
, "Vulnerable\n");
461 enum mitigation_state
arm64_get_spectre_v4_state(void)
463 return spectre_v4_state
;
466 static enum mitigation_state
spectre_v4_get_cpu_hw_mitigation_state(void)
468 static const struct midr_range spectre_v4_safe_list
[] = {
469 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35
),
470 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53
),
471 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55
),
472 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53
),
473 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER
),
474 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER
),
478 if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list
))
479 return SPECTRE_UNAFFECTED
;
481 /* CPU features are detected first */
482 if (this_cpu_has_cap(ARM64_SSBS
))
483 return SPECTRE_MITIGATED
;
485 return SPECTRE_VULNERABLE
;
488 static enum mitigation_state
spectre_v4_get_cpu_fw_mitigation_state(void)
491 struct arm_smccc_res res
;
493 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID
,
494 ARM_SMCCC_ARCH_WORKAROUND_2
, &res
);
498 case SMCCC_RET_SUCCESS
:
499 return SPECTRE_MITIGATED
;
500 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED
:
502 case SMCCC_RET_NOT_REQUIRED
:
503 return SPECTRE_UNAFFECTED
;
506 case SMCCC_RET_NOT_SUPPORTED
:
507 return SPECTRE_VULNERABLE
;
511 bool has_spectre_v4(const struct arm64_cpu_capabilities
*cap
, int scope
)
513 enum mitigation_state state
;
515 WARN_ON(scope
!= SCOPE_LOCAL_CPU
|| preemptible());
517 state
= spectre_v4_get_cpu_hw_mitigation_state();
518 if (state
== SPECTRE_VULNERABLE
)
519 state
= spectre_v4_get_cpu_fw_mitigation_state();
521 return state
!= SPECTRE_UNAFFECTED
;
524 bool try_emulate_el1_ssbs(struct pt_regs
*regs
, u32 instr
)
526 const u32 instr_mask
= ~(1U << PSTATE_Imm_shift
);
527 const u32 instr_val
= 0xd500401f | PSTATE_SSBS
;
529 if ((instr
& instr_mask
) != instr_val
)
532 if (instr
& BIT(PSTATE_Imm_shift
))
533 regs
->pstate
|= PSR_SSBS_BIT
;
535 regs
->pstate
&= ~PSR_SSBS_BIT
;
537 arm64_skip_faulting_instruction(regs
, 4);
541 static enum mitigation_state
spectre_v4_enable_hw_mitigation(void)
543 enum mitigation_state state
;
546 * If the system is mitigated but this CPU doesn't have SSBS, then
547 * we must be on the safelist and there's nothing more to do.
549 state
= spectre_v4_get_cpu_hw_mitigation_state();
550 if (state
!= SPECTRE_MITIGATED
|| !this_cpu_has_cap(ARM64_SSBS
))
553 if (spectre_v4_mitigations_off()) {
554 sysreg_clear_set(sctlr_el1
, 0, SCTLR_ELx_DSSBS
);
556 return SPECTRE_VULNERABLE
;
559 /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
563 * SSBS is self-synchronizing and is intended to affect subsequent
564 * speculative instructions, but some CPUs can speculate with a stale
567 * Mitigate this with an unconditional speculation barrier, as CPUs
568 * could mis-speculate branches and bypass a conditional barrier.
570 if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386
))
573 return SPECTRE_MITIGATED
;
577 * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
578 * we fallthrough and check whether firmware needs to be called on this CPU.
580 void __init
spectre_v4_patch_fw_mitigation_enable(struct alt_instr
*alt
,
582 __le32
*updptr
, int nr_inst
)
584 BUG_ON(nr_inst
!= 1); /* Branch -> NOP */
586 if (spectre_v4_mitigations_off())
589 if (cpus_have_cap(ARM64_SSBS
))
592 if (spectre_v4_mitigations_dynamic())
593 *updptr
= cpu_to_le32(aarch64_insn_gen_nop());
597 * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
598 * to call into firmware to adjust the mitigation state.
600 void __init
smccc_patch_fw_mitigation_conduit(struct alt_instr
*alt
,
602 __le32
*updptr
, int nr_inst
)
606 BUG_ON(nr_inst
!= 1); /* NOP -> HVC/SMC */
608 switch (arm_smccc_1_1_get_conduit()) {
609 case SMCCC_CONDUIT_HVC
:
610 insn
= aarch64_insn_get_hvc_value();
612 case SMCCC_CONDUIT_SMC
:
613 insn
= aarch64_insn_get_smc_value();
619 *updptr
= cpu_to_le32(insn
);
622 static enum mitigation_state
spectre_v4_enable_fw_mitigation(void)
624 enum mitigation_state state
;
626 state
= spectre_v4_get_cpu_fw_mitigation_state();
627 if (state
!= SPECTRE_MITIGATED
)
630 if (spectre_v4_mitigations_off()) {
631 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2
, false, NULL
);
632 return SPECTRE_VULNERABLE
;
635 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2
, true, NULL
);
637 if (spectre_v4_mitigations_dynamic())
638 __this_cpu_write(arm64_ssbd_callback_required
, 1);
640 return SPECTRE_MITIGATED
;
643 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities
*__unused
)
645 enum mitigation_state state
;
647 WARN_ON(preemptible());
649 state
= spectre_v4_enable_hw_mitigation();
650 if (state
== SPECTRE_VULNERABLE
)
651 state
= spectre_v4_enable_fw_mitigation();
653 update_mitigation_state(&spectre_v4_state
, state
);
656 static void __update_pstate_ssbs(struct pt_regs
*regs
, bool state
)
658 u64 bit
= compat_user_mode(regs
) ? PSR_AA32_SSBS_BIT
: PSR_SSBS_BIT
;
663 regs
->pstate
&= ~bit
;
666 void spectre_v4_enable_task_mitigation(struct task_struct
*tsk
)
668 struct pt_regs
*regs
= task_pt_regs(tsk
);
669 bool ssbs
= false, kthread
= tsk
->flags
& PF_KTHREAD
;
671 if (spectre_v4_mitigations_off())
673 else if (spectre_v4_mitigations_dynamic() && !kthread
)
674 ssbs
= !test_tsk_thread_flag(tsk
, TIF_SSBD
);
676 __update_pstate_ssbs(regs
, ssbs
);
680 * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
681 * This is interesting because the "speculation disabled" behaviour can be
682 * configured so that it is preserved across exec(), which means that the
683 * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
686 static void ssbd_prctl_enable_mitigation(struct task_struct
*task
)
688 task_clear_spec_ssb_noexec(task
);
689 task_set_spec_ssb_disable(task
);
690 set_tsk_thread_flag(task
, TIF_SSBD
);
693 static void ssbd_prctl_disable_mitigation(struct task_struct
*task
)
695 task_clear_spec_ssb_noexec(task
);
696 task_clear_spec_ssb_disable(task
);
697 clear_tsk_thread_flag(task
, TIF_SSBD
);
700 static int ssbd_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
704 /* Enable speculation: disable mitigation */
706 * Force disabled speculation prevents it from being
709 if (task_spec_ssb_force_disable(task
))
713 * If the mitigation is forced on, then speculation is forced
714 * off and we again prevent it from being re-enabled.
716 if (spectre_v4_mitigations_on())
719 ssbd_prctl_disable_mitigation(task
);
721 case PR_SPEC_FORCE_DISABLE
:
722 /* Force disable speculation: force enable mitigation */
724 * If the mitigation is forced off, then speculation is forced
725 * on and we prevent it from being disabled.
727 if (spectre_v4_mitigations_off())
730 task_set_spec_ssb_force_disable(task
);
732 case PR_SPEC_DISABLE
:
733 /* Disable speculation: enable mitigation */
734 /* Same as PR_SPEC_FORCE_DISABLE */
735 if (spectre_v4_mitigations_off())
738 ssbd_prctl_enable_mitigation(task
);
740 case PR_SPEC_DISABLE_NOEXEC
:
741 /* Disable speculation until execve(): enable mitigation */
743 * If the mitigation state is forced one way or the other, then
744 * we must fail now before we try to toggle it on execve().
746 if (task_spec_ssb_force_disable(task
) ||
747 spectre_v4_mitigations_off() ||
748 spectre_v4_mitigations_on()) {
752 ssbd_prctl_enable_mitigation(task
);
753 task_set_spec_ssb_noexec(task
);
759 spectre_v4_enable_task_mitigation(task
);
763 int arch_prctl_spec_ctrl_set(struct task_struct
*task
, unsigned long which
,
767 case PR_SPEC_STORE_BYPASS
:
768 return ssbd_prctl_set(task
, ctrl
);
774 static int ssbd_prctl_get(struct task_struct
*task
)
776 switch (spectre_v4_state
) {
777 case SPECTRE_UNAFFECTED
:
778 return PR_SPEC_NOT_AFFECTED
;
779 case SPECTRE_MITIGATED
:
780 if (spectre_v4_mitigations_on())
781 return PR_SPEC_NOT_AFFECTED
;
783 if (spectre_v4_mitigations_dynamic())
786 /* Mitigations are disabled, so we're vulnerable. */
788 case SPECTRE_VULNERABLE
:
791 return PR_SPEC_ENABLE
;
794 /* Check the mitigation state for this task */
795 if (task_spec_ssb_force_disable(task
))
796 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
798 if (task_spec_ssb_noexec(task
))
799 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE_NOEXEC
;
801 if (task_spec_ssb_disable(task
))
802 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
804 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
807 int arch_prctl_spec_ctrl_get(struct task_struct
*task
, unsigned long which
)
810 case PR_SPEC_STORE_BYPASS
:
811 return ssbd_prctl_get(task
);
821 * - Mitigated by a branchy loop a CPU specific number of times, and listed
822 * in our "loop mitigated list".
823 * - Mitigated in software by the firmware Spectre v2 call.
824 * - Has the ClearBHB instruction to perform the mitigation.
825 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
826 * software mitigation in the vectors is needed.
827 * - Has CSV2.3, so is unaffected.
829 static enum mitigation_state spectre_bhb_state
;
831 enum mitigation_state
arm64_get_spectre_bhb_state(void)
833 return spectre_bhb_state
;
836 enum bhb_mitigation_bits
{
842 static unsigned long system_bhb_mitigations
;
845 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
846 * SCOPE_SYSTEM call will give the right answer.
848 u8
spectre_bhb_loop_affected(int scope
)
853 if (scope
== SCOPE_LOCAL_CPU
) {
854 static const struct midr_range spectre_bhb_k32_list
[] = {
855 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78
),
856 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE
),
857 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C
),
858 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1
),
859 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710
),
860 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2
),
861 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2
),
862 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1
),
865 static const struct midr_range spectre_bhb_k24_list
[] = {
866 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76
),
867 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77
),
868 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1
),
871 static const struct midr_range spectre_bhb_k11_list
[] = {
872 MIDR_ALL_VERSIONS(MIDR_AMPERE1
),
875 static const struct midr_range spectre_bhb_k8_list
[] = {
876 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72
),
877 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57
),
881 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list
))
883 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list
))
885 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list
))
887 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list
))
890 max_bhb_k
= max(max_bhb_k
, k
);
898 static enum mitigation_state
spectre_bhb_get_cpu_fw_mitigation_state(void)
901 struct arm_smccc_res res
;
903 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID
,
904 ARM_SMCCC_ARCH_WORKAROUND_3
, &res
);
908 case SMCCC_RET_SUCCESS
:
909 return SPECTRE_MITIGATED
;
910 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED
:
911 return SPECTRE_UNAFFECTED
;
914 case SMCCC_RET_NOT_SUPPORTED
:
915 return SPECTRE_VULNERABLE
;
919 static bool is_spectre_bhb_fw_affected(int scope
)
921 static bool system_affected
;
922 enum mitigation_state fw_state
;
923 bool has_smccc
= arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE
;
924 static const struct midr_range spectre_bhb_firmware_mitigated_list
[] = {
925 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73
),
926 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75
),
929 bool cpu_in_list
= is_midr_in_range_list(read_cpuid_id(),
930 spectre_bhb_firmware_mitigated_list
);
932 if (scope
!= SCOPE_LOCAL_CPU
)
933 return system_affected
;
935 fw_state
= spectre_bhb_get_cpu_fw_mitigation_state();
936 if (cpu_in_list
|| (has_smccc
&& fw_state
== SPECTRE_MITIGATED
)) {
937 system_affected
= true;
944 static bool supports_ecbhb(int scope
)
948 if (scope
== SCOPE_LOCAL_CPU
)
949 mmfr1
= read_sysreg_s(SYS_ID_AA64MMFR1_EL1
);
951 mmfr1
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1
);
953 return cpuid_feature_extract_unsigned_field(mmfr1
,
954 ID_AA64MMFR1_EL1_ECBHB_SHIFT
);
957 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities
*entry
,
960 WARN_ON(scope
!= SCOPE_LOCAL_CPU
|| preemptible());
962 if (supports_csv2p3(scope
))
965 if (supports_clearbhb(scope
))
968 if (spectre_bhb_loop_affected(scope
))
971 if (is_spectre_bhb_fw_affected(scope
))
977 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot
)
979 const char *v
= arm64_get_bp_hardening_vector(slot
);
981 __this_cpu_write(this_cpu_vector
, v
);
984 * When KPTI is in use, the vectors are switched when exiting to
987 if (cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0
))
990 write_sysreg(v
, vbar_el1
);
994 static bool __read_mostly __nospectre_bhb
;
995 static int __init
parse_spectre_bhb_param(char *str
)
997 __nospectre_bhb
= true;
1000 early_param("nospectre_bhb", parse_spectre_bhb_param
);
1002 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities
*entry
)
1004 bp_hardening_cb_t cpu_cb
;
1005 enum mitigation_state fw_state
, state
= SPECTRE_VULNERABLE
;
1006 struct bp_hardening_data
*data
= this_cpu_ptr(&bp_hardening_data
);
1008 if (!is_spectre_bhb_affected(entry
, SCOPE_LOCAL_CPU
))
1011 if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE
) {
1012 /* No point mitigating Spectre-BHB alone. */
1013 } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
)) {
1014 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1015 } else if (cpu_mitigations_off() || __nospectre_bhb
) {
1016 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1017 } else if (supports_ecbhb(SCOPE_LOCAL_CPU
)) {
1018 state
= SPECTRE_MITIGATED
;
1019 set_bit(BHB_HW
, &system_bhb_mitigations
);
1020 } else if (supports_clearbhb(SCOPE_LOCAL_CPU
)) {
1022 * Ensure KVM uses the indirect vector which will have ClearBHB
1026 data
->slot
= HYP_VECTOR_INDIRECT
;
1028 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN
);
1029 state
= SPECTRE_MITIGATED
;
1030 set_bit(BHB_INSN
, &system_bhb_mitigations
);
1031 } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU
)) {
1033 * Ensure KVM uses the indirect vector which will have the
1034 * branchy-loop added. A57/A72-r0 will already have selected
1035 * the spectre-indirect vector, which is sufficient for BHB
1039 data
->slot
= HYP_VECTOR_INDIRECT
;
1041 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP
);
1042 state
= SPECTRE_MITIGATED
;
1043 set_bit(BHB_LOOP
, &system_bhb_mitigations
);
1044 } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU
)) {
1045 fw_state
= spectre_bhb_get_cpu_fw_mitigation_state();
1046 if (fw_state
== SPECTRE_MITIGATED
) {
1048 * Ensure KVM uses one of the spectre bp_hardening
1049 * vectors. The indirect vector doesn't include the EL3
1050 * call, so needs upgrading to
1051 * HYP_VECTOR_SPECTRE_INDIRECT.
1053 if (!data
->slot
|| data
->slot
== HYP_VECTOR_INDIRECT
)
1056 this_cpu_set_vectors(EL1_VECTOR_BHB_FW
);
1059 * The WA3 call in the vectors supersedes the WA1 call
1060 * made during context-switch. Uninstall any firmware
1061 * bp_hardening callback.
1063 cpu_cb
= spectre_v2_get_sw_mitigation_cb();
1064 if (__this_cpu_read(bp_hardening_data
.fn
) != cpu_cb
)
1065 __this_cpu_write(bp_hardening_data
.fn
, NULL
);
1067 state
= SPECTRE_MITIGATED
;
1068 set_bit(BHB_FW
, &system_bhb_mitigations
);
1072 update_mitigation_state(&spectre_bhb_state
, state
);
1075 /* Patched to NOP when enabled */
1076 void noinstr
spectre_bhb_patch_loop_mitigation_enable(struct alt_instr
*alt
,
1078 __le32
*updptr
, int nr_inst
)
1080 BUG_ON(nr_inst
!= 1);
1082 if (test_bit(BHB_LOOP
, &system_bhb_mitigations
))
1083 *updptr
++ = cpu_to_le32(aarch64_insn_gen_nop());
1086 /* Patched to NOP when enabled */
1087 void noinstr
spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr
*alt
,
1089 __le32
*updptr
, int nr_inst
)
1091 BUG_ON(nr_inst
!= 1);
1093 if (test_bit(BHB_FW
, &system_bhb_mitigations
))
1094 *updptr
++ = cpu_to_le32(aarch64_insn_gen_nop());
1097 /* Patched to correct the immediate */
1098 void noinstr
spectre_bhb_patch_loop_iter(struct alt_instr
*alt
,
1099 __le32
*origptr
, __le32
*updptr
, int nr_inst
)
1103 u16 loop_count
= spectre_bhb_loop_affected(SCOPE_SYSTEM
);
1105 BUG_ON(nr_inst
!= 1); /* MOV -> MOV */
1107 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
))
1110 insn
= le32_to_cpu(*origptr
);
1111 rd
= aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD
, insn
);
1112 insn
= aarch64_insn_gen_movewide(rd
, loop_count
, 0,
1113 AARCH64_INSN_VARIANT_64BIT
,
1114 AARCH64_INSN_MOVEWIDE_ZERO
);
1115 *updptr
++ = cpu_to_le32(insn
);
1118 /* Patched to mov WA3 when supported */
1119 void noinstr
spectre_bhb_patch_wa3(struct alt_instr
*alt
,
1120 __le32
*origptr
, __le32
*updptr
, int nr_inst
)
1125 BUG_ON(nr_inst
!= 1); /* MOV -> MOV */
1127 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
) ||
1128 !test_bit(BHB_FW
, &system_bhb_mitigations
))
1131 insn
= le32_to_cpu(*origptr
);
1132 rd
= aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD
, insn
);
1134 insn
= aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR
,
1135 AARCH64_INSN_VARIANT_32BIT
,
1136 AARCH64_INSN_REG_ZR
, rd
,
1137 ARM_SMCCC_ARCH_WORKAROUND_3
);
1138 if (WARN_ON_ONCE(insn
== AARCH64_BREAK_FAULT
))
1141 *updptr
++ = cpu_to_le32(insn
);
1144 /* Patched to NOP when not supported */
1145 void __init
spectre_bhb_patch_clearbhb(struct alt_instr
*alt
,
1146 __le32
*origptr
, __le32
*updptr
, int nr_inst
)
1148 BUG_ON(nr_inst
!= 2);
1150 if (test_bit(BHB_INSN
, &system_bhb_mitigations
))
1153 *updptr
++ = cpu_to_le32(aarch64_insn_gen_nop());
1154 *updptr
++ = cpu_to_le32(aarch64_insn_gen_nop());
1157 #ifdef CONFIG_BPF_SYSCALL
1158 #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
1159 void unpriv_ebpf_notify(int new_state
)
1161 if (spectre_v2_state
== SPECTRE_VULNERABLE
||
1162 spectre_bhb_state
!= SPECTRE_MITIGATED
)
1166 pr_err("WARNING: %s", EBPF_WARN
);