1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Interface for managing mitigations for Spectre vulnerabilities.
5 * Copyright (C) 2020 Google LLC
6 * Author: Will Deacon <will@kernel.org>
9 #ifndef __ASM_SPECTRE_H
10 #define __ASM_SPECTRE_H
12 #define BP_HARDEN_EL2_SLOTS 4
13 #define __BP_HARDEN_HYP_VECS_SZ ((BP_HARDEN_EL2_SLOTS - 1) * SZ_2K)
16 #include <linux/smp.h>
17 #include <asm/percpu.h>
19 #include <asm/cpufeature.h>
22 /* Watch out, ordering is important here. */
23 enum mitigation_state
{
33 * Note: the order of this enum corresponds to __bp_harden_hyp_vecs and
34 * we rely on having the direct vectors first.
36 enum arm64_hyp_spectre_vector
{
38 * Take exceptions directly to __kvm_hyp_vector. This must be
39 * 0 so that it used by default when mitigations are not needed.
44 * Bounce via a slot in the hypervisor text mapping of
45 * __bp_harden_hyp_vecs, which contains an SMC call.
47 HYP_VECTOR_SPECTRE_DIRECT
,
50 * Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
51 * next to the idmap page.
56 * Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
57 * next to the idmap page, which contains an SMC call.
59 HYP_VECTOR_SPECTRE_INDIRECT
,
62 typedef void (*bp_hardening_cb_t
)(void);
64 struct bp_hardening_data
{
65 enum arm64_hyp_spectre_vector slot
;
69 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data
, bp_hardening_data
);
71 /* Called during entry so must be __always_inline */
72 static __always_inline
void arm64_apply_bp_hardening(void)
74 struct bp_hardening_data
*d
;
76 if (!alternative_has_cap_unlikely(ARM64_SPECTRE_V2
))
79 d
= this_cpu_ptr(&bp_hardening_data
);
84 enum mitigation_state
arm64_get_spectre_v2_state(void);
85 bool has_spectre_v2(const struct arm64_cpu_capabilities
*cap
, int scope
);
86 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities
*__unused
);
88 bool has_spectre_v3a(const struct arm64_cpu_capabilities
*cap
, int scope
);
89 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities
*__unused
);
91 enum mitigation_state
arm64_get_spectre_v4_state(void);
92 bool has_spectre_v4(const struct arm64_cpu_capabilities
*cap
, int scope
);
93 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities
*__unused
);
94 void spectre_v4_enable_task_mitigation(struct task_struct
*tsk
);
96 enum mitigation_state
arm64_get_meltdown_state(void);
98 enum mitigation_state
arm64_get_spectre_bhb_state(void);
99 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities
*entry
, int scope
);
100 u8
spectre_bhb_loop_affected(int scope
);
101 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities
*__unused
);
102 bool try_emulate_el1_ssbs(struct pt_regs
*regs
, u32 instr
);
104 void spectre_v4_patch_fw_mitigation_enable(struct alt_instr
*alt
, __le32
*origptr
,
105 __le32
*updptr
, int nr_inst
);
106 void smccc_patch_fw_mitigation_conduit(struct alt_instr
*alt
, __le32
*origptr
,
107 __le32
*updptr
, int nr_inst
);
108 void spectre_bhb_patch_loop_mitigation_enable(struct alt_instr
*alt
, __le32
*origptr
,
109 __le32
*updptr
, int nr_inst
);
110 void spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr
*alt
, __le32
*origptr
,
111 __le32
*updptr
, int nr_inst
);
112 void spectre_bhb_patch_loop_iter(struct alt_instr
*alt
,
113 __le32
*origptr
, __le32
*updptr
, int nr_inst
);
114 void spectre_bhb_patch_wa3(struct alt_instr
*alt
,
115 __le32
*origptr
, __le32
*updptr
, int nr_inst
);
116 void spectre_bhb_patch_clearbhb(struct alt_instr
*alt
,
117 __le32
*origptr
, __le32
*updptr
, int nr_inst
);
119 #endif /* __ASSEMBLY__ */
120 #endif /* __ASM_SPECTRE_H */