1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
6 #include <linux/static_key.h>
8 #include <asm/alternative.h>
9 #include <asm/alternative-asm.h>
10 #include <asm/cpufeatures.h>
11 #include <asm/msr-index.h>
14 * This should be used immediately before a retpoline alternative. It tells
15 * objtool where the retpolines are so that it can make sense of the control
16 * flow by just reading the original instruction(s) and ignoring the
19 #define ANNOTATE_NOSPEC_ALTERNATIVE \
20 ANNOTATE_IGNORE_ALTERNATIVE
23 * Fill the CPU return stack buffer.
25 * Each entry in the RSB, if used for a speculative 'ret', contains an
26 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
28 * This is required in various cases for retpoline and IBRS-based
29 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
30 * eliminate potentially bogus entries from the RSB, and sometimes
31 * purely to ensure that it doesn't get empty, which on some CPUs would
32 * allow predictions from other (unwanted!) sources to be used.
34 * We define a CPP macro such that it can be used from both .S files and
35 * inline assembly. It's possible to do a .macro and then include that
36 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
39 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
40 #define RSB_FILL_LOOPS 16 /* To avoid underflow */
43 * Google experimented with loop-unrolling and this turned out to be
44 * the optimal version — two calls, each with their own speculation
45 * trap should their return address end up getting used, in a loop.
47 #define __FILL_RETURN_BUFFER(reg, nr, sp) \
51 773: /* speculation trap */ \
57 775: /* speculation trap */ \
64 add $(BITS_PER_LONG/8) * nr, sp;
69 * This should be used immediately before an indirect jump/call. It tells
70 * objtool the subsequent indirect jump/call is vouched safe for retpoline
73 .macro ANNOTATE_RETPOLINE_SAFE
75 .pushsection
.discard
.retpoline_safe
76 _ASM_PTR
.Lannotate_\@
81 * These are the bare retpoline primitives for indirect jmp and call.
82 * Do not use these directly; they only exist to make the ALTERNATIVE
83 * invocation below less ugly.
85 .macro RETPOLINE_JMP reg
:req
97 * This is a wrapper around RETPOLINE_JMP so the called function in reg
98 * returns to the instruction after the macro.
100 .macro RETPOLINE_CALL reg
:req
102 .Ldo_retpoline_jmp_\@
:
105 call
.Ldo_retpoline_jmp_\@
109 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
110 * indirect jmp/call which may be susceptible to the Spectre variant 2
113 .macro JMP_NOSPEC reg
:req
114 #ifdef CONFIG_RETPOLINE
115 ANNOTATE_NOSPEC_ALTERNATIVE
116 ALTERNATIVE_2
__stringify(ANNOTATE_RETPOLINE_SAFE
; jmp
*\reg
), \
117 __stringify(RETPOLINE_JMP
\reg
), X86_FEATURE_RETPOLINE
, \
118 __stringify(lfence
; ANNOTATE_RETPOLINE_SAFE
; jmp
*\reg
), X86_FEATURE_RETPOLINE_AMD
124 .macro CALL_NOSPEC reg
:req
125 #ifdef CONFIG_RETPOLINE
126 ANNOTATE_NOSPEC_ALTERNATIVE
127 ALTERNATIVE_2
__stringify(ANNOTATE_RETPOLINE_SAFE
; call
*\reg
), \
128 __stringify(RETPOLINE_CALL
\reg
), X86_FEATURE_RETPOLINE
,\
129 __stringify(lfence
; ANNOTATE_RETPOLINE_SAFE
; call
*\reg
), X86_FEATURE_RETPOLINE_AMD
136 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
137 * monstrosity above, manually.
139 .macro FILL_RETURN_BUFFER reg
:req nr
:req ftr
:req
140 #ifdef CONFIG_RETPOLINE
141 ANNOTATE_NOSPEC_ALTERNATIVE
142 ALTERNATIVE
"jmp .Lskip_rsb_\@", \
143 __stringify(__FILL_RETURN_BUFFER(\reg
,\nr
,%_ASM_SP
)) \
149 #else /* __ASSEMBLY__ */
151 #define ANNOTATE_RETPOLINE_SAFE \
153 ".pushsection .discard.retpoline_safe\n\t" \
154 _ASM_PTR " 999b\n\t" \
157 #ifdef CONFIG_RETPOLINE
161 * Inline asm uses the %V modifier which is only in newer GCC
162 * which is ensured when CONFIG_RETPOLINE is defined.
164 # define CALL_NOSPEC \
165 ANNOTATE_NOSPEC_ALTERNATIVE \
167 ANNOTATE_RETPOLINE_SAFE \
168 "call *%[thunk_target]\n", \
169 "call __x86_indirect_thunk_%V[thunk_target]\n", \
170 X86_FEATURE_RETPOLINE, \
172 ANNOTATE_RETPOLINE_SAFE \
173 "call *%[thunk_target]\n", \
174 X86_FEATURE_RETPOLINE_AMD)
175 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
177 #else /* CONFIG_X86_32 */
179 * For i386 we use the original ret-equivalent retpoline, because
180 * otherwise we'll run out of registers. We don't care about CET
183 # define CALL_NOSPEC \
184 ANNOTATE_NOSPEC_ALTERNATIVE \
186 ANNOTATE_RETPOLINE_SAFE \
187 "call *%[thunk_target]\n", \
190 "901: call 903f;\n" \
195 "903: lea 4(%%esp), %%esp;\n" \
196 " pushl %[thunk_target];\n" \
199 "904: call 901b;\n", \
200 X86_FEATURE_RETPOLINE, \
202 ANNOTATE_RETPOLINE_SAFE \
203 "call *%[thunk_target]\n", \
204 X86_FEATURE_RETPOLINE_AMD)
206 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
208 #else /* No retpoline for C / inline asm */
209 # define CALL_NOSPEC "call *%[thunk_target]\n"
210 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
213 /* The Spectre V2 mitigation variants */
214 enum spectre_v2_mitigation
{
216 SPECTRE_V2_RETPOLINE_GENERIC
,
217 SPECTRE_V2_RETPOLINE_AMD
,
218 SPECTRE_V2_IBRS_ENHANCED
,
221 /* The indirect branch speculation control variants */
222 enum spectre_v2_user_mitigation
{
223 SPECTRE_V2_USER_NONE
,
224 SPECTRE_V2_USER_STRICT
,
225 SPECTRE_V2_USER_STRICT_PREFERRED
,
226 SPECTRE_V2_USER_PRCTL
,
227 SPECTRE_V2_USER_SECCOMP
,
230 /* The Speculative Store Bypass disable variants */
231 enum ssb_mitigation
{
232 SPEC_STORE_BYPASS_NONE
,
233 SPEC_STORE_BYPASS_DISABLE
,
234 SPEC_STORE_BYPASS_PRCTL
,
235 SPEC_STORE_BYPASS_SECCOMP
,
238 extern char __indirect_thunk_start
[];
239 extern char __indirect_thunk_end
[];
242 * On VMEXIT we must ensure that no RSB predictions learned in the guest
243 * can be followed in the host, by overwriting the RSB completely. Both
244 * retpoline and IBRS mitigations for Spectre v2 need this; only on future
245 * CPUs with IBRS_ALL *might* it be avoided.
247 static inline void vmexit_fill_RSB(void)
249 #ifdef CONFIG_RETPOLINE
252 asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
253 ALTERNATIVE("jmp 910f",
254 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS
, %1)),
255 X86_FEATURE_RETPOLINE
)
257 : "=r" (loops
), ASM_CALL_CONSTRAINT
262 static __always_inline
263 void alternative_msr_write(unsigned int msr
, u64 val
, unsigned int feature
)
265 asm volatile(ALTERNATIVE("", "wrmsr", %c
[feature
])
268 "d" ((u32
)(val
>> 32)),
269 [feature
] "i" (feature
)
273 static inline void indirect_branch_prediction_barrier(void)
275 u64 val
= PRED_CMD_IBPB
;
277 alternative_msr_write(MSR_IA32_PRED_CMD
, val
, X86_FEATURE_USE_IBPB
);
280 /* The Intel SPEC CTRL MSR base value cache */
281 extern u64 x86_spec_ctrl_base
;
284 * With retpoline, we must use IBRS to restrict branch prediction
285 * before calling into firmware.
287 * (Implemented as CPP macros due to header hell.)
289 #define firmware_restrict_branch_speculation_start() \
291 u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
294 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
295 X86_FEATURE_USE_IBRS_FW); \
298 #define firmware_restrict_branch_speculation_end() \
300 u64 val = x86_spec_ctrl_base; \
302 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
303 X86_FEATURE_USE_IBRS_FW); \
307 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp
);
308 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb
);
309 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb
);
311 DECLARE_STATIC_KEY_FALSE(mds_user_clear
);
312 DECLARE_STATIC_KEY_FALSE(mds_idle_clear
);
314 #include <asm/segment.h>
317 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
319 * This uses the otherwise unused and obsolete VERW instruction in
320 * combination with microcode which triggers a CPU buffer flush when the
321 * instruction is executed.
323 static inline void mds_clear_cpu_buffers(void)
325 static const u16 ds
= __KERNEL_DS
;
328 * Has to be the memory-operand variant because only that
329 * guarantees the CPU buffer flush functionality according to
330 * documentation. The register-operand variant does not.
331 * Works with any segment selector, but a valid writable
332 * data segment is the fastest variant.
334 * "cc" clobber is required because VERW modifies ZF.
336 asm volatile("verw %[ds]" : : [ds
] "m" (ds
) : "cc");
340 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
342 * Clear CPU buffers if the corresponding static key is enabled
344 static inline void mds_user_clear_cpu_buffers(void)
346 if (static_branch_likely(&mds_user_clear
))
347 mds_clear_cpu_buffers();
351 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
353 * Clear CPU buffers if the corresponding static key is enabled
355 static inline void mds_idle_clear_cpu_buffers(void)
357 if (static_branch_likely(&mds_idle_clear
))
358 mds_clear_cpu_buffers();
361 #endif /* __ASSEMBLY__ */
364 * Below is used in the eBPF JIT compiler and emits the byte sequence
365 * for the following assembly:
367 * With retpolines configured:
375 * mov %rax,(%rsp) for x86_64
376 * mov %edx,(%esp) for x86_32
379 * Without retpolines configured:
381 * jmp *%rax for x86_64
382 * jmp *%edx for x86_32
384 #ifdef CONFIG_RETPOLINE
385 # ifdef CONFIG_X86_64
386 # define RETPOLINE_RAX_BPF_JIT_SIZE 17
387 # define RETPOLINE_RAX_BPF_JIT() \
389 EMIT1_off32(0xE8, 7); /* callq do_rop */ \
391 EMIT2(0xF3, 0x90); /* pause */ \
392 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
393 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
395 EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
396 EMIT1(0xC3); /* retq */ \
398 # else /* !CONFIG_X86_64 */
399 # define RETPOLINE_EDX_BPF_JIT() \
401 EMIT1_off32(0xE8, 7); /* call do_rop */ \
403 EMIT2(0xF3, 0x90); /* pause */ \
404 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
405 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
407 EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */ \
408 EMIT1(0xC3); /* ret */ \
411 #else /* !CONFIG_RETPOLINE */
412 # ifdef CONFIG_X86_64
413 # define RETPOLINE_RAX_BPF_JIT_SIZE 2
414 # define RETPOLINE_RAX_BPF_JIT() \
415 EMIT2(0xFF, 0xE0); /* jmp *%rax */
416 # else /* !CONFIG_X86_64 */
417 # define RETPOLINE_EDX_BPF_JIT() \
418 EMIT2(0xFF, 0xE2) /* jmp *%edx */
422 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */