1 // SPDX-License-Identifier: GPL-2.0-only
3 * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
6 * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
8 * This code was originally written hastily under an awful lot of stress and so
9 * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10 * instantly makes me feel ill. Thanks, Jann. Thann.
12 * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13 * Copyright (C) 2020 Google LLC
15 * "If there's something strange in your neighbourhood, who you gonna call?"
17 * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
20 #include <linux/arm-smccc.h>
21 #include <linux/cpu.h>
22 #include <linux/device.h>
23 #include <linux/nospec.h>
24 #include <linux/prctl.h>
25 #include <linux/sched/task_stack.h>
28 #include <asm/spectre.h>
29 #include <asm/traps.h>
33 * We try to ensure that the mitigation state can never change as the result of
34 * onlining a late CPU.
36 static void update_mitigation_state(enum mitigation_state
*oldp
,
37 enum mitigation_state
new)
39 enum mitigation_state state
;
42 state
= READ_ONCE(*oldp
);
46 /* Userspace almost certainly can't deal with this. */
47 if (WARN_ON(system_capabilities_finalized()))
49 } while (cmpxchg_relaxed(oldp
, state
, new) != state
);
55 * The kernel can't protect userspace for this one: it's each person for
56 * themselves. Advertise what we're doing and be done with it.
58 ssize_t
cpu_show_spectre_v1(struct device
*dev
, struct device_attribute
*attr
,
61 return sprintf(buf
, "Mitigation: __user pointer sanitization\n");
67 * This one sucks. A CPU is either:
69 * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
70 * - Mitigated in hardware and listed in our "safe list".
71 * - Mitigated in software by firmware.
72 * - Mitigated in software by a CPU-specific dance in the kernel and a
73 * firmware call at EL2.
76 * It's not unlikely for different CPUs in a big.LITTLE system to fall into
79 static enum mitigation_state spectre_v2_state
;
81 static bool __read_mostly __nospectre_v2
;
82 static int __init
parse_spectre_v2_param(char *str
)
84 __nospectre_v2
= true;
87 early_param("nospectre_v2", parse_spectre_v2_param
);
89 static bool spectre_v2_mitigations_off(void)
91 bool ret
= __nospectre_v2
|| cpu_mitigations_off();
94 pr_info_once("spectre-v2 mitigation disabled by command line option\n");
99 ssize_t
cpu_show_spectre_v2(struct device
*dev
, struct device_attribute
*attr
,
102 switch (spectre_v2_state
) {
103 case SPECTRE_UNAFFECTED
:
104 return sprintf(buf
, "Not affected\n");
105 case SPECTRE_MITIGATED
:
106 return sprintf(buf
, "Mitigation: Branch predictor hardening\n");
107 case SPECTRE_VULNERABLE
:
110 return sprintf(buf
, "Vulnerable\n");
114 static enum mitigation_state
spectre_v2_get_cpu_hw_mitigation_state(void)
117 static const struct midr_range spectre_v2_safe_list
[] = {
118 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35
),
119 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53
),
120 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55
),
121 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53
),
122 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110
),
123 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER
),
124 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER
),
125 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER
),
129 /* If the CPU has CSV2 set, we're safe */
130 pfr0
= read_cpuid(ID_AA64PFR0_EL1
);
131 if (cpuid_feature_extract_unsigned_field(pfr0
, ID_AA64PFR0_CSV2_SHIFT
))
132 return SPECTRE_UNAFFECTED
;
134 /* Alternatively, we have a list of unaffected CPUs */
135 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list
))
136 return SPECTRE_UNAFFECTED
;
138 return SPECTRE_VULNERABLE
;
141 static enum mitigation_state
spectre_v2_get_cpu_fw_mitigation_state(void)
144 struct arm_smccc_res res
;
146 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID
,
147 ARM_SMCCC_ARCH_WORKAROUND_1
, &res
);
151 case SMCCC_RET_SUCCESS
:
152 return SPECTRE_MITIGATED
;
153 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED
:
154 return SPECTRE_UNAFFECTED
;
157 case SMCCC_RET_NOT_SUPPORTED
:
158 return SPECTRE_VULNERABLE
;
162 bool has_spectre_v2(const struct arm64_cpu_capabilities
*entry
, int scope
)
164 WARN_ON(scope
!= SCOPE_LOCAL_CPU
|| preemptible());
166 if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED
)
169 if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED
)
175 enum mitigation_state
arm64_get_spectre_v2_state(void)
177 return spectre_v2_state
;
180 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data
, bp_hardening_data
);
182 static void install_bp_hardening_cb(bp_hardening_cb_t fn
)
184 __this_cpu_write(bp_hardening_data
.fn
, fn
);
187 * Vinz Clortho takes the hyp_vecs start/end "keys" at
188 * the door when we're a guest. Skip the hyp-vectors work.
190 if (!is_hyp_mode_available())
193 __this_cpu_write(bp_hardening_data
.slot
, HYP_VECTOR_SPECTRE_DIRECT
);
196 static void call_smc_arch_workaround_1(void)
198 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1
, NULL
);
201 static void call_hvc_arch_workaround_1(void)
203 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1
, NULL
);
206 static void qcom_link_stack_sanitisation(void)
210 asm volatile("mov %0, x30 \n"
218 static bp_hardening_cb_t
spectre_v2_get_sw_mitigation_cb(void)
220 u32 midr
= read_cpuid_id();
221 if (((midr
& MIDR_CPU_MODEL_MASK
) != MIDR_QCOM_FALKOR
) &&
222 ((midr
& MIDR_CPU_MODEL_MASK
) != MIDR_QCOM_FALKOR_V1
))
225 return qcom_link_stack_sanitisation
;
228 static enum mitigation_state
spectre_v2_enable_fw_mitigation(void)
230 bp_hardening_cb_t cb
;
231 enum mitigation_state state
;
233 state
= spectre_v2_get_cpu_fw_mitigation_state();
234 if (state
!= SPECTRE_MITIGATED
)
237 if (spectre_v2_mitigations_off())
238 return SPECTRE_VULNERABLE
;
240 switch (arm_smccc_1_1_get_conduit()) {
241 case SMCCC_CONDUIT_HVC
:
242 cb
= call_hvc_arch_workaround_1
;
245 case SMCCC_CONDUIT_SMC
:
246 cb
= call_smc_arch_workaround_1
;
250 return SPECTRE_VULNERABLE
;
254 * Prefer a CPU-specific workaround if it exists. Note that we
255 * still rely on firmware for the mitigation at EL2.
257 cb
= spectre_v2_get_sw_mitigation_cb() ?: cb
;
258 install_bp_hardening_cb(cb
);
259 return SPECTRE_MITIGATED
;
262 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities
*__unused
)
264 enum mitigation_state state
;
266 WARN_ON(preemptible());
268 state
= spectre_v2_get_cpu_hw_mitigation_state();
269 if (state
== SPECTRE_VULNERABLE
)
270 state
= spectre_v2_enable_fw_mitigation();
272 update_mitigation_state(&spectre_v2_state
, state
);
278 * Phew, there's not an awful lot to do here! We just instruct EL2 to use
279 * an indirect trampoline for the hyp vectors so that guests can't read
280 * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
282 bool has_spectre_v3a(const struct arm64_cpu_capabilities
*entry
, int scope
)
284 static const struct midr_range spectre_v3a_unsafe_list
[] = {
285 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57
),
286 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72
),
290 WARN_ON(scope
!= SCOPE_LOCAL_CPU
|| preemptible());
291 return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list
);
294 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities
*__unused
)
296 struct bp_hardening_data
*data
= this_cpu_ptr(&bp_hardening_data
);
298 if (this_cpu_has_cap(ARM64_SPECTRE_V3A
))
299 data
->slot
+= HYP_VECTOR_INDIRECT
;
305 * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
308 * - Mitigated in hardware and listed in our "safe list".
309 * - Mitigated in hardware via PSTATE.SSBS.
310 * - Mitigated in software by firmware (sometimes referred to as SSBD).
312 * Wait, that doesn't sound so bad, does it? Keep reading...
314 * A major source of headaches is that the software mitigation is enabled both
315 * on a per-task basis, but can also be forced on for the kernel, necessitating
316 * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
317 * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
318 * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
319 * so you can have systems that have both firmware and SSBS mitigations. This
320 * means we actually have to reject late onlining of CPUs with mitigations if
321 * all of the currently onlined CPUs are safelisted, as the mitigation tends to
322 * be opt-in for userspace. Yes, really, the cure is worse than the disease.
324 * The only good part is that if the firmware mitigation is present, then it is
325 * present for all CPUs, meaning we don't have to worry about late onlining of a
326 * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
328 * Give me a VAX-11/780 any day of the week...
330 static enum mitigation_state spectre_v4_state
;
332 /* This is the per-cpu state tracking whether we need to talk to firmware */
333 DEFINE_PER_CPU_READ_MOSTLY(u64
, arm64_ssbd_callback_required
);
335 enum spectre_v4_policy
{
336 SPECTRE_V4_POLICY_MITIGATION_DYNAMIC
,
337 SPECTRE_V4_POLICY_MITIGATION_ENABLED
,
338 SPECTRE_V4_POLICY_MITIGATION_DISABLED
,
341 static enum spectre_v4_policy __read_mostly __spectre_v4_policy
;
343 static const struct spectre_v4_param
{
345 enum spectre_v4_policy policy
;
346 } spectre_v4_params
[] = {
347 { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED
, },
348 { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED
, },
349 { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC
, },
351 static int __init
parse_spectre_v4_param(char *str
)
358 for (i
= 0; i
< ARRAY_SIZE(spectre_v4_params
); i
++) {
359 const struct spectre_v4_param
*param
= &spectre_v4_params
[i
];
361 if (strncmp(str
, param
->str
, strlen(param
->str
)))
364 __spectre_v4_policy
= param
->policy
;
370 early_param("ssbd", parse_spectre_v4_param
);
373 * Because this was all written in a rush by people working in different silos,
374 * we've ended up with multiple command line options to control the same thing.
375 * Wrap these up in some helpers, which prefer disabling the mitigation if faced
376 * with contradictory parameters. The mitigation is always either "off",
379 static bool spectre_v4_mitigations_off(void)
381 bool ret
= cpu_mitigations_off() ||
382 __spectre_v4_policy
== SPECTRE_V4_POLICY_MITIGATION_DISABLED
;
385 pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
390 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
391 static bool spectre_v4_mitigations_dynamic(void)
393 return !spectre_v4_mitigations_off() &&
394 __spectre_v4_policy
== SPECTRE_V4_POLICY_MITIGATION_DYNAMIC
;
397 static bool spectre_v4_mitigations_on(void)
399 return !spectre_v4_mitigations_off() &&
400 __spectre_v4_policy
== SPECTRE_V4_POLICY_MITIGATION_ENABLED
;
403 ssize_t
cpu_show_spec_store_bypass(struct device
*dev
,
404 struct device_attribute
*attr
, char *buf
)
406 switch (spectre_v4_state
) {
407 case SPECTRE_UNAFFECTED
:
408 return sprintf(buf
, "Not affected\n");
409 case SPECTRE_MITIGATED
:
410 return sprintf(buf
, "Mitigation: Speculative Store Bypass disabled via prctl\n");
411 case SPECTRE_VULNERABLE
:
414 return sprintf(buf
, "Vulnerable\n");
418 enum mitigation_state
arm64_get_spectre_v4_state(void)
420 return spectre_v4_state
;
423 static enum mitigation_state
spectre_v4_get_cpu_hw_mitigation_state(void)
425 static const struct midr_range spectre_v4_safe_list
[] = {
426 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35
),
427 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53
),
428 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55
),
429 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53
),
430 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER
),
431 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER
),
435 if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list
))
436 return SPECTRE_UNAFFECTED
;
438 /* CPU features are detected first */
439 if (this_cpu_has_cap(ARM64_SSBS
))
440 return SPECTRE_MITIGATED
;
442 return SPECTRE_VULNERABLE
;
445 static enum mitigation_state
spectre_v4_get_cpu_fw_mitigation_state(void)
448 struct arm_smccc_res res
;
450 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID
,
451 ARM_SMCCC_ARCH_WORKAROUND_2
, &res
);
455 case SMCCC_RET_SUCCESS
:
456 return SPECTRE_MITIGATED
;
457 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED
:
459 case SMCCC_RET_NOT_REQUIRED
:
460 return SPECTRE_UNAFFECTED
;
463 case SMCCC_RET_NOT_SUPPORTED
:
464 return SPECTRE_VULNERABLE
;
468 bool has_spectre_v4(const struct arm64_cpu_capabilities
*cap
, int scope
)
470 enum mitigation_state state
;
472 WARN_ON(scope
!= SCOPE_LOCAL_CPU
|| preemptible());
474 state
= spectre_v4_get_cpu_hw_mitigation_state();
475 if (state
== SPECTRE_VULNERABLE
)
476 state
= spectre_v4_get_cpu_fw_mitigation_state();
478 return state
!= SPECTRE_UNAFFECTED
;
481 static int ssbs_emulation_handler(struct pt_regs
*regs
, u32 instr
)
486 if (instr
& BIT(PSTATE_Imm_shift
))
487 regs
->pstate
|= PSR_SSBS_BIT
;
489 regs
->pstate
&= ~PSR_SSBS_BIT
;
491 arm64_skip_faulting_instruction(regs
, 4);
495 static struct undef_hook ssbs_emulation_hook
= {
496 .instr_mask
= ~(1U << PSTATE_Imm_shift
),
497 .instr_val
= 0xd500401f | PSTATE_SSBS
,
498 .fn
= ssbs_emulation_handler
,
501 static enum mitigation_state
spectre_v4_enable_hw_mitigation(void)
503 static bool undef_hook_registered
= false;
504 static DEFINE_RAW_SPINLOCK(hook_lock
);
505 enum mitigation_state state
;
508 * If the system is mitigated but this CPU doesn't have SSBS, then
509 * we must be on the safelist and there's nothing more to do.
511 state
= spectre_v4_get_cpu_hw_mitigation_state();
512 if (state
!= SPECTRE_MITIGATED
|| !this_cpu_has_cap(ARM64_SSBS
))
515 raw_spin_lock(&hook_lock
);
516 if (!undef_hook_registered
) {
517 register_undef_hook(&ssbs_emulation_hook
);
518 undef_hook_registered
= true;
520 raw_spin_unlock(&hook_lock
);
522 if (spectre_v4_mitigations_off()) {
523 sysreg_clear_set(sctlr_el1
, 0, SCTLR_ELx_DSSBS
);
525 return SPECTRE_VULNERABLE
;
528 /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
530 return SPECTRE_MITIGATED
;
534 * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
535 * we fallthrough and check whether firmware needs to be called on this CPU.
537 void __init
spectre_v4_patch_fw_mitigation_enable(struct alt_instr
*alt
,
539 __le32
*updptr
, int nr_inst
)
541 BUG_ON(nr_inst
!= 1); /* Branch -> NOP */
543 if (spectre_v4_mitigations_off())
546 if (cpus_have_final_cap(ARM64_SSBS
))
549 if (spectre_v4_mitigations_dynamic())
550 *updptr
= cpu_to_le32(aarch64_insn_gen_nop());
554 * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
555 * to call into firmware to adjust the mitigation state.
557 void __init
spectre_v4_patch_fw_mitigation_conduit(struct alt_instr
*alt
,
559 __le32
*updptr
, int nr_inst
)
563 BUG_ON(nr_inst
!= 1); /* NOP -> HVC/SMC */
565 switch (arm_smccc_1_1_get_conduit()) {
566 case SMCCC_CONDUIT_HVC
:
567 insn
= aarch64_insn_get_hvc_value();
569 case SMCCC_CONDUIT_SMC
:
570 insn
= aarch64_insn_get_smc_value();
576 *updptr
= cpu_to_le32(insn
);
579 static enum mitigation_state
spectre_v4_enable_fw_mitigation(void)
581 enum mitigation_state state
;
583 state
= spectre_v4_get_cpu_fw_mitigation_state();
584 if (state
!= SPECTRE_MITIGATED
)
587 if (spectre_v4_mitigations_off()) {
588 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2
, false, NULL
);
589 return SPECTRE_VULNERABLE
;
592 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2
, true, NULL
);
594 if (spectre_v4_mitigations_dynamic())
595 __this_cpu_write(arm64_ssbd_callback_required
, 1);
597 return SPECTRE_MITIGATED
;
600 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities
*__unused
)
602 enum mitigation_state state
;
604 WARN_ON(preemptible());
606 state
= spectre_v4_enable_hw_mitigation();
607 if (state
== SPECTRE_VULNERABLE
)
608 state
= spectre_v4_enable_fw_mitigation();
610 update_mitigation_state(&spectre_v4_state
, state
);
613 static void __update_pstate_ssbs(struct pt_regs
*regs
, bool state
)
615 u64 bit
= compat_user_mode(regs
) ? PSR_AA32_SSBS_BIT
: PSR_SSBS_BIT
;
620 regs
->pstate
&= ~bit
;
623 void spectre_v4_enable_task_mitigation(struct task_struct
*tsk
)
625 struct pt_regs
*regs
= task_pt_regs(tsk
);
626 bool ssbs
= false, kthread
= tsk
->flags
& PF_KTHREAD
;
628 if (spectre_v4_mitigations_off())
630 else if (spectre_v4_mitigations_dynamic() && !kthread
)
631 ssbs
= !test_tsk_thread_flag(tsk
, TIF_SSBD
);
633 __update_pstate_ssbs(regs
, ssbs
);
637 * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
638 * This is interesting because the "speculation disabled" behaviour can be
639 * configured so that it is preserved across exec(), which means that the
640 * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
643 static void ssbd_prctl_enable_mitigation(struct task_struct
*task
)
645 task_clear_spec_ssb_noexec(task
);
646 task_set_spec_ssb_disable(task
);
647 set_tsk_thread_flag(task
, TIF_SSBD
);
650 static void ssbd_prctl_disable_mitigation(struct task_struct
*task
)
652 task_clear_spec_ssb_noexec(task
);
653 task_clear_spec_ssb_disable(task
);
654 clear_tsk_thread_flag(task
, TIF_SSBD
);
657 static int ssbd_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
661 /* Enable speculation: disable mitigation */
663 * Force disabled speculation prevents it from being
666 if (task_spec_ssb_force_disable(task
))
670 * If the mitigation is forced on, then speculation is forced
671 * off and we again prevent it from being re-enabled.
673 if (spectre_v4_mitigations_on())
676 ssbd_prctl_disable_mitigation(task
);
678 case PR_SPEC_FORCE_DISABLE
:
679 /* Force disable speculation: force enable mitigation */
681 * If the mitigation is forced off, then speculation is forced
682 * on and we prevent it from being disabled.
684 if (spectre_v4_mitigations_off())
687 task_set_spec_ssb_force_disable(task
);
689 case PR_SPEC_DISABLE
:
690 /* Disable speculation: enable mitigation */
691 /* Same as PR_SPEC_FORCE_DISABLE */
692 if (spectre_v4_mitigations_off())
695 ssbd_prctl_enable_mitigation(task
);
697 case PR_SPEC_DISABLE_NOEXEC
:
698 /* Disable speculation until execve(): enable mitigation */
700 * If the mitigation state is forced one way or the other, then
701 * we must fail now before we try to toggle it on execve().
703 if (task_spec_ssb_force_disable(task
) ||
704 spectre_v4_mitigations_off() ||
705 spectre_v4_mitigations_on()) {
709 ssbd_prctl_enable_mitigation(task
);
710 task_set_spec_ssb_noexec(task
);
716 spectre_v4_enable_task_mitigation(task
);
720 int arch_prctl_spec_ctrl_set(struct task_struct
*task
, unsigned long which
,
724 case PR_SPEC_STORE_BYPASS
:
725 return ssbd_prctl_set(task
, ctrl
);
731 static int ssbd_prctl_get(struct task_struct
*task
)
733 switch (spectre_v4_state
) {
734 case SPECTRE_UNAFFECTED
:
735 return PR_SPEC_NOT_AFFECTED
;
736 case SPECTRE_MITIGATED
:
737 if (spectre_v4_mitigations_on())
738 return PR_SPEC_NOT_AFFECTED
;
740 if (spectre_v4_mitigations_dynamic())
743 /* Mitigations are disabled, so we're vulnerable. */
745 case SPECTRE_VULNERABLE
:
748 return PR_SPEC_ENABLE
;
751 /* Check the mitigation state for this task */
752 if (task_spec_ssb_force_disable(task
))
753 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
755 if (task_spec_ssb_noexec(task
))
756 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE_NOEXEC
;
758 if (task_spec_ssb_disable(task
))
759 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
761 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
764 int arch_prctl_spec_ctrl_get(struct task_struct
*task
, unsigned long which
)
767 case PR_SPEC_STORE_BYPASS
:
768 return ssbd_prctl_get(task
);