1 // SPDX-License-Identifier: GPL-2.0-only
3 * FP/SIMD context switching and fault handling
5 * Copyright (C) 2012 ARM Ltd.
6 * Author: Catalin Marinas <catalin.marinas@arm.com>
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11 #include <linux/bottom_half.h>
12 #include <linux/bug.h>
13 #include <linux/cache.h>
14 #include <linux/compat.h>
15 #include <linux/compiler.h>
16 #include <linux/cpu.h>
17 #include <linux/cpu_pm.h>
18 #include <linux/ctype.h>
19 #include <linux/kernel.h>
20 #include <linux/linkage.h>
21 #include <linux/irqflags.h>
22 #include <linux/init.h>
23 #include <linux/percpu.h>
24 #include <linux/prctl.h>
25 #include <linux/preempt.h>
26 #include <linux/ptrace.h>
27 #include <linux/sched/signal.h>
28 #include <linux/sched/task_stack.h>
29 #include <linux/signal.h>
30 #include <linux/slab.h>
31 #include <linux/stddef.h>
32 #include <linux/sysctl.h>
33 #include <linux/swab.h>
36 #include <asm/exception.h>
37 #include <asm/fpsimd.h>
38 #include <asm/cpufeature.h>
39 #include <asm/cputype.h>
41 #include <asm/processor.h>
43 #include <asm/sigcontext.h>
44 #include <asm/sysreg.h>
45 #include <asm/traps.h>
48 #define FPEXC_IOF (1 << 0)
49 #define FPEXC_DZF (1 << 1)
50 #define FPEXC_OFF (1 << 2)
51 #define FPEXC_UFF (1 << 3)
52 #define FPEXC_IXF (1 << 4)
53 #define FPEXC_IDF (1 << 7)
56 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
58 * In order to reduce the number of times the FPSIMD state is needlessly saved
59 * and restored, we need to keep track of two things:
60 * (a) for each task, we need to remember which CPU was the last one to have
61 * the task's FPSIMD state loaded into its FPSIMD registers;
62 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
63 * been loaded into its FPSIMD registers most recently, or whether it has
64 * been used to perform kernel mode NEON in the meantime.
66 * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
67 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
68 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
69 * address of the userland FPSIMD state of the task that was loaded onto the CPU
70 * the most recently, or NULL if kernel mode NEON has been performed after that.
72 * With this in place, we no longer have to restore the next FPSIMD state right
73 * when switching between tasks. Instead, we can defer this check to userland
74 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
75 * task's fpsimd_cpu are still mutually in sync. If this is the case, we
76 * can omit the FPSIMD restore.
78 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
79 * indicate whether or not the userland FPSIMD state of the current task is
80 * present in the registers. The flag is set unless the FPSIMD registers of this
81 * CPU currently contain the most recent userland FPSIMD state of the current
82 * task. If the task is behaving as a VMM, then this is will be managed by
83 * KVM which will clear it to indicate that the vcpu FPSIMD state is currently
84 * loaded on the CPU, allowing the state to be saved if a FPSIMD-aware
85 * softirq kicks in. Upon vcpu_put(), KVM will save the vcpu FP state and
86 * flag the register state as invalid.
88 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may be
89 * called from softirq context, which will save the task's FPSIMD context back
90 * to task_struct. To prevent this from racing with the manipulation of the
91 * task's FPSIMD state from task context and thereby corrupting the state, it
92 * is necessary to protect any manipulation of a task's fpsimd_state or
93 * TIF_FOREIGN_FPSTATE flag with get_cpu_fpsimd_context(), which will suspend
94 * softirq servicing entirely until put_cpu_fpsimd_context() is called.
96 * For a certain task, the sequence may look something like this:
97 * - the task gets scheduled in; if both the task's fpsimd_cpu field
98 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
99 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
100 * cleared, otherwise it is set;
102 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
103 * userland FPSIMD state is copied from memory to the registers, the task's
104 * fpsimd_cpu field is set to the id of the current CPU, the current
105 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
106 * TIF_FOREIGN_FPSTATE flag is cleared;
108 * - the task executes an ordinary syscall; upon return to userland, the
109 * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
112 * - the task executes a syscall which executes some NEON instructions; this is
113 * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
114 * register contents to memory, clears the fpsimd_last_state per-cpu variable
115 * and sets the TIF_FOREIGN_FPSTATE flag;
117 * - the task gets preempted after kernel_neon_end() is called; as we have not
118 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
119 * whatever is in the FPSIMD registers is not saved to memory, but discarded.
122 static DEFINE_PER_CPU(struct cpu_fp_state
, fpsimd_last_state
);
124 __ro_after_init
struct vl_info vl_info
[ARM64_VEC_MAX
] = {
125 #ifdef CONFIG_ARM64_SVE
127 .type
= ARM64_VEC_SVE
,
129 .min_vl
= SVE_VL_MIN
,
130 .max_vl
= SVE_VL_MIN
,
131 .max_virtualisable_vl
= SVE_VL_MIN
,
134 #ifdef CONFIG_ARM64_SME
136 .type
= ARM64_VEC_SME
,
142 static unsigned int vec_vl_inherit_flag(enum vec_type type
)
146 return TIF_SVE_VL_INHERIT
;
148 return TIF_SME_VL_INHERIT
;
156 int __default_vl
; /* Default VL for tasks */
159 static struct vl_config vl_config
[ARM64_VEC_MAX
];
161 static inline int get_default_vl(enum vec_type type
)
163 return READ_ONCE(vl_config
[type
].__default_vl
);
166 #ifdef CONFIG_ARM64_SVE
168 static inline int get_sve_default_vl(void)
170 return get_default_vl(ARM64_VEC_SVE
);
173 static inline void set_default_vl(enum vec_type type
, int val
)
175 WRITE_ONCE(vl_config
[type
].__default_vl
, val
);
178 static inline void set_sve_default_vl(int val
)
180 set_default_vl(ARM64_VEC_SVE
, val
);
183 static void __percpu
*efi_sve_state
;
185 #else /* ! CONFIG_ARM64_SVE */
187 /* Dummy declaration for code that will be optimised out: */
188 extern void __percpu
*efi_sve_state
;
190 #endif /* ! CONFIG_ARM64_SVE */
192 #ifdef CONFIG_ARM64_SME
194 static int get_sme_default_vl(void)
196 return get_default_vl(ARM64_VEC_SME
);
199 static void set_sme_default_vl(int val
)
201 set_default_vl(ARM64_VEC_SME
, val
);
204 static void sme_free(struct task_struct
*);
208 static inline void sme_free(struct task_struct
*t
) { }
212 static void fpsimd_bind_task_to_cpu(void);
215 * Claim ownership of the CPU FPSIMD context for use by the calling context.
217 * The caller may freely manipulate the FPSIMD context metadata until
218 * put_cpu_fpsimd_context() is called.
220 * On RT kernels local_bh_disable() is not sufficient because it only
221 * serializes soft interrupt related sections via a local lock, but stays
222 * preemptible. Disabling preemption is the right choice here as bottom
223 * half processing is always in thread context on RT kernels so it
224 * implicitly prevents bottom half processing as well.
226 static void get_cpu_fpsimd_context(void)
228 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
235 * Release the CPU FPSIMD context.
237 * Must be called from a context in which get_cpu_fpsimd_context() was
238 * previously called, with no call to put_cpu_fpsimd_context() in the
241 static void put_cpu_fpsimd_context(void)
243 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
249 unsigned int task_get_vl(const struct task_struct
*task
, enum vec_type type
)
251 return task
->thread
.vl
[type
];
254 void task_set_vl(struct task_struct
*task
, enum vec_type type
,
257 task
->thread
.vl
[type
] = vl
;
260 unsigned int task_get_vl_onexec(const struct task_struct
*task
,
263 return task
->thread
.vl_onexec
[type
];
266 void task_set_vl_onexec(struct task_struct
*task
, enum vec_type type
,
269 task
->thread
.vl_onexec
[type
] = vl
;
273 * TIF_SME controls whether a task can use SME without trapping while
274 * in userspace, when TIF_SME is set then we must have storage
275 * allocated in sve_state and sme_state to store the contents of both ZA
276 * and the SVE registers for both streaming and non-streaming modes.
278 * If both SVCR.ZA and SVCR.SM are disabled then at any point we
279 * may disable TIF_SME and reenable traps.
284 * TIF_SVE controls whether a task can use SVE without trapping while
285 * in userspace, and also (together with TIF_SME) the way a task's
286 * FPSIMD/SVE state is stored in thread_struct.
288 * The kernel uses this flag to track whether a user task is actively
289 * using SVE, and therefore whether full SVE register state needs to
290 * be tracked. If not, the cheaper FPSIMD context handling code can
291 * be used instead of the more costly SVE equivalents.
293 * * TIF_SVE or SVCR.SM set:
295 * The task can execute SVE instructions while in userspace without
296 * trapping to the kernel.
298 * During any syscall, the kernel may optionally clear TIF_SVE and
299 * discard the vector state except for the FPSIMD subset.
303 * An attempt by the user task to execute an SVE instruction causes
304 * do_sve_acc() to be called, which does some preparation and then
307 * During any syscall, the kernel may optionally clear TIF_SVE and
308 * discard the vector state except for the FPSIMD subset.
310 * The data will be stored in one of two formats:
312 * * FPSIMD only - FP_STATE_FPSIMD:
314 * When the FPSIMD only state stored task->thread.fp_type is set to
315 * FP_STATE_FPSIMD, the FPSIMD registers V0-V31 are encoded in
316 * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
317 * logically zero but not stored anywhere; P0-P15 and FFR are not
318 * stored and have unspecified values from userspace's point of
319 * view. For hygiene purposes, the kernel zeroes them on next use,
320 * but userspace is discouraged from relying on this.
322 * task->thread.sve_state does not need to be non-NULL, valid or any
323 * particular size: it must not be dereferenced and any data stored
324 * there should be considered stale and not referenced.
326 * * SVE state - FP_STATE_SVE:
328 * When the full SVE state is stored task->thread.fp_type is set to
329 * FP_STATE_SVE and Z0-Z31 (incorporating Vn in bits[127:0] or the
330 * corresponding Zn), P0-P15 and FFR are encoded in in
331 * task->thread.sve_state, formatted appropriately for vector
332 * length task->thread.sve_vl or, if SVCR.SM is set,
333 * task->thread.sme_vl. The storage for the vector registers in
334 * task->thread.uw.fpsimd_state should be ignored.
336 * task->thread.sve_state must point to a valid buffer at least
337 * sve_state_size(task) bytes in size. The data stored in
338 * task->thread.uw.fpsimd_state.vregs should be considered stale
339 * and not referenced.
341 * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
342 * irrespective of whether TIF_SVE is clear or set, since these are
343 * not vector length dependent.
347 * Update current's FPSIMD/SVE registers from thread_struct.
349 * This function should be called only when the FPSIMD/SVE state in
350 * thread_struct is known to be up to date, when preparing to enter
353 static void task_fpsimd_load(void)
355 bool restore_sve_regs
= false;
358 WARN_ON(!system_supports_fpsimd());
359 WARN_ON(preemptible());
360 WARN_ON(test_thread_flag(TIF_KERNEL_FPSTATE
));
362 if (system_supports_fpmr())
363 write_sysreg_s(current
->thread
.uw
.fpmr
, SYS_FPMR
);
365 if (system_supports_sve() || system_supports_sme()) {
366 switch (current
->thread
.fp_type
) {
367 case FP_STATE_FPSIMD
:
368 /* Stop tracking SVE for this task until next use. */
369 if (test_and_clear_thread_flag(TIF_SVE
))
373 if (!thread_sm_enabled(¤t
->thread
) &&
374 !WARN_ON_ONCE(!test_and_set_thread_flag(TIF_SVE
)))
377 if (test_thread_flag(TIF_SVE
))
378 sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current
)) - 1);
380 restore_sve_regs
= true;
385 * This indicates either a bug in
386 * fpsimd_save_user_state() or memory corruption, we
387 * should always record an explicit format
388 * when we save. We always at least have the
389 * memory allocated for FPSMID registers so
390 * try that and hope for the best.
393 clear_thread_flag(TIF_SVE
);
398 /* Restore SME, override SVE register configuration if needed */
399 if (system_supports_sme()) {
400 unsigned long sme_vl
= task_get_sme_vl(current
);
402 /* Ensure VL is set up for restoring data */
403 if (test_thread_flag(TIF_SME
))
404 sme_set_vq(sve_vq_from_vl(sme_vl
) - 1);
406 write_sysreg_s(current
->thread
.svcr
, SYS_SVCR
);
408 if (thread_za_enabled(¤t
->thread
))
409 sme_load_state(current
->thread
.sme_state
,
410 system_supports_sme2());
412 if (thread_sm_enabled(¤t
->thread
))
413 restore_ffr
= system_supports_fa64();
416 if (restore_sve_regs
) {
417 WARN_ON_ONCE(current
->thread
.fp_type
!= FP_STATE_SVE
);
418 sve_load_state(sve_pffr(¤t
->thread
),
419 ¤t
->thread
.uw
.fpsimd_state
.fpsr
,
422 WARN_ON_ONCE(current
->thread
.fp_type
!= FP_STATE_FPSIMD
);
423 fpsimd_load_state(¤t
->thread
.uw
.fpsimd_state
);
428 * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
429 * date with respect to the CPU registers. Note carefully that the
430 * current context is the context last bound to the CPU stored in
431 * last, if KVM is involved this may be the guest VM context rather
432 * than the host thread for the VM pointed to by current. This means
433 * that we must always reference the state storage via last rather
434 * than via current, if we are saving KVM state then it will have
435 * ensured that the type of registers to save is set in last->to_save.
437 static void fpsimd_save_user_state(void)
439 struct cpu_fp_state
const *last
=
440 this_cpu_ptr(&fpsimd_last_state
);
441 /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
442 bool save_sve_regs
= false;
446 WARN_ON(!system_supports_fpsimd());
447 WARN_ON(preemptible());
449 if (test_thread_flag(TIF_FOREIGN_FPSTATE
))
452 if (system_supports_fpmr())
453 *(last
->fpmr
) = read_sysreg_s(SYS_FPMR
);
456 * If a task is in a syscall the ABI allows us to only
457 * preserve the state shared with FPSIMD so don't bother
458 * saving the full SVE state in that case.
460 if ((last
->to_save
== FP_STATE_CURRENT
&& test_thread_flag(TIF_SVE
) &&
461 !in_syscall(current_pt_regs())) ||
462 last
->to_save
== FP_STATE_SVE
) {
463 save_sve_regs
= true;
468 if (system_supports_sme()) {
469 u64
*svcr
= last
->svcr
;
471 *svcr
= read_sysreg_s(SYS_SVCR
);
473 if (*svcr
& SVCR_ZA_MASK
)
474 sme_save_state(last
->sme_state
,
475 system_supports_sme2());
477 /* If we are in streaming mode override regular SVE. */
478 if (*svcr
& SVCR_SM_MASK
) {
479 save_sve_regs
= true;
480 save_ffr
= system_supports_fa64();
485 if (IS_ENABLED(CONFIG_ARM64_SVE
) && save_sve_regs
) {
486 /* Get the configured VL from RDVL, will account for SM */
487 if (WARN_ON(sve_get_vl() != vl
)) {
489 * Can't save the user regs, so current would
490 * re-enter user with corrupt state.
491 * There's no way to recover, so kill it:
493 force_signal_inject(SIGKILL
, SI_KERNEL
, 0, 0);
497 sve_save_state((char *)last
->sve_state
+
499 &last
->st
->fpsr
, save_ffr
);
500 *last
->fp_type
= FP_STATE_SVE
;
502 fpsimd_save_state(last
->st
);
503 *last
->fp_type
= FP_STATE_FPSIMD
;
508 * All vector length selection from userspace comes through here.
509 * We're on a slow path, so some sanity-checks are included.
510 * If things go wrong there's a bug somewhere, but try to fall back to a
513 static unsigned int find_supported_vector_length(enum vec_type type
,
516 struct vl_info
*info
= &vl_info
[type
];
518 int max_vl
= info
->max_vl
;
520 if (WARN_ON(!sve_vl_valid(vl
)))
523 if (WARN_ON(!sve_vl_valid(max_vl
)))
524 max_vl
= info
->min_vl
;
528 if (vl
< info
->min_vl
)
531 bit
= find_next_bit(info
->vq_map
, SVE_VQ_MAX
,
532 __vq_to_bit(sve_vq_from_vl(vl
)));
533 return sve_vl_from_vq(__bit_to_vq(bit
));
536 #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
538 static int vec_proc_do_default_vl(const struct ctl_table
*table
, int write
,
539 void *buffer
, size_t *lenp
, loff_t
*ppos
)
541 struct vl_info
*info
= table
->extra1
;
542 enum vec_type type
= info
->type
;
544 int vl
= get_default_vl(type
);
545 struct ctl_table tmp_table
= {
547 .maxlen
= sizeof(vl
),
550 ret
= proc_dointvec(&tmp_table
, write
, buffer
, lenp
, ppos
);
554 /* Writing -1 has the special meaning "set to max": */
558 if (!sve_vl_valid(vl
))
561 set_default_vl(type
, find_supported_vector_length(type
, vl
));
565 static struct ctl_table sve_default_vl_table
[] = {
567 .procname
= "sve_default_vector_length",
569 .proc_handler
= vec_proc_do_default_vl
,
570 .extra1
= &vl_info
[ARM64_VEC_SVE
],
574 static int __init
sve_sysctl_init(void)
576 if (system_supports_sve())
577 if (!register_sysctl("abi", sve_default_vl_table
))
583 #else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
584 static int __init
sve_sysctl_init(void) { return 0; }
585 #endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
587 #if defined(CONFIG_ARM64_SME) && defined(CONFIG_SYSCTL)
588 static struct ctl_table sme_default_vl_table
[] = {
590 .procname
= "sme_default_vector_length",
592 .proc_handler
= vec_proc_do_default_vl
,
593 .extra1
= &vl_info
[ARM64_VEC_SME
],
597 static int __init
sme_sysctl_init(void)
599 if (system_supports_sme())
600 if (!register_sysctl("abi", sme_default_vl_table
))
606 #else /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */
607 static int __init
sme_sysctl_init(void) { return 0; }
608 #endif /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */
610 #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
611 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
613 #ifdef CONFIG_CPU_BIG_ENDIAN
614 static __uint128_t
arm64_cpu_to_le128(__uint128_t x
)
617 u64 b
= swab64(x
>> 64);
619 return ((__uint128_t
)a
<< 64) | b
;
622 static __uint128_t
arm64_cpu_to_le128(__uint128_t x
)
628 #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
630 static void __fpsimd_to_sve(void *sst
, struct user_fpsimd_state
const *fst
,
636 for (i
= 0; i
< SVE_NUM_ZREGS
; ++i
) {
637 p
= (__uint128_t
*)ZREG(sst
, vq
, i
);
638 *p
= arm64_cpu_to_le128(fst
->vregs
[i
]);
643 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
644 * task->thread.sve_state.
646 * Task can be a non-runnable task, or current. In the latter case,
647 * the caller must have ownership of the cpu FPSIMD context before calling
649 * task->thread.sve_state must point to at least sve_state_size(task)
650 * bytes of allocated kernel memory.
651 * task->thread.uw.fpsimd_state must be up to date before calling this
654 static void fpsimd_to_sve(struct task_struct
*task
)
657 void *sst
= task
->thread
.sve_state
;
658 struct user_fpsimd_state
const *fst
= &task
->thread
.uw
.fpsimd_state
;
660 if (!system_supports_sve() && !system_supports_sme())
663 vq
= sve_vq_from_vl(thread_get_cur_vl(&task
->thread
));
664 __fpsimd_to_sve(sst
, fst
, vq
);
668 * Transfer the SVE state in task->thread.sve_state to
669 * task->thread.uw.fpsimd_state.
671 * Task can be a non-runnable task, or current. In the latter case,
672 * the caller must have ownership of the cpu FPSIMD context before calling
674 * task->thread.sve_state must point to at least sve_state_size(task)
675 * bytes of allocated kernel memory.
676 * task->thread.sve_state must be up to date before calling this function.
678 static void sve_to_fpsimd(struct task_struct
*task
)
681 void const *sst
= task
->thread
.sve_state
;
682 struct user_fpsimd_state
*fst
= &task
->thread
.uw
.fpsimd_state
;
684 __uint128_t
const *p
;
686 if (!system_supports_sve() && !system_supports_sme())
689 vl
= thread_get_cur_vl(&task
->thread
);
690 vq
= sve_vq_from_vl(vl
);
691 for (i
= 0; i
< SVE_NUM_ZREGS
; ++i
) {
692 p
= (__uint128_t
const *)ZREG(sst
, vq
, i
);
693 fst
->vregs
[i
] = arm64_le128_to_cpu(*p
);
697 void cpu_enable_fpmr(const struct arm64_cpu_capabilities
*__always_unused p
)
699 write_sysreg_s(read_sysreg_s(SYS_SCTLR_EL1
) | SCTLR_EL1_EnFPM_MASK
,
703 #ifdef CONFIG_ARM64_SVE
705 * Call __sve_free() directly only if you know task can't be scheduled
708 static void __sve_free(struct task_struct
*task
)
710 kfree(task
->thread
.sve_state
);
711 task
->thread
.sve_state
= NULL
;
714 static void sve_free(struct task_struct
*task
)
716 WARN_ON(test_tsk_thread_flag(task
, TIF_SVE
));
722 * Return how many bytes of memory are required to store the full SVE
723 * state for task, given task's currently configured vector length.
725 size_t sve_state_size(struct task_struct
const *task
)
729 if (system_supports_sve())
730 vl
= task_get_sve_vl(task
);
731 if (system_supports_sme())
732 vl
= max(vl
, task_get_sme_vl(task
));
734 return SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl
));
738 * Ensure that task->thread.sve_state is allocated and sufficiently large.
740 * This function should be used only in preparation for replacing
741 * task->thread.sve_state with new data. The memory is always zeroed
742 * here to prevent stale data from showing through: this is done in
743 * the interest of testability and predictability: except in the
744 * do_sve_acc() case, there is no ABI requirement to hide stale data
745 * written previously be task.
747 void sve_alloc(struct task_struct
*task
, bool flush
)
749 if (task
->thread
.sve_state
) {
751 memset(task
->thread
.sve_state
, 0,
752 sve_state_size(task
));
756 /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
757 task
->thread
.sve_state
=
758 kzalloc(sve_state_size(task
), GFP_KERNEL
);
763 * Force the FPSIMD state shared with SVE to be updated in the SVE state
764 * even if the SVE state is the current active state.
766 * This should only be called by ptrace. task must be non-runnable.
767 * task->thread.sve_state must point to at least sve_state_size(task)
768 * bytes of allocated kernel memory.
770 void fpsimd_force_sync_to_sve(struct task_struct
*task
)
776 * Ensure that task->thread.sve_state is up to date with respect to
777 * the user task, irrespective of when SVE is in use or not.
779 * This should only be called by ptrace. task must be non-runnable.
780 * task->thread.sve_state must point to at least sve_state_size(task)
781 * bytes of allocated kernel memory.
783 void fpsimd_sync_to_sve(struct task_struct
*task
)
785 if (!test_tsk_thread_flag(task
, TIF_SVE
) &&
786 !thread_sm_enabled(&task
->thread
))
791 * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
792 * the user task, irrespective of whether SVE is in use or not.
794 * This should only be called by ptrace. task must be non-runnable.
795 * task->thread.sve_state must point to at least sve_state_size(task)
796 * bytes of allocated kernel memory.
798 void sve_sync_to_fpsimd(struct task_struct
*task
)
800 if (task
->thread
.fp_type
== FP_STATE_SVE
)
805 * Ensure that task->thread.sve_state is up to date with respect to
806 * the task->thread.uw.fpsimd_state.
808 * This should only be called by ptrace to merge new FPSIMD register
809 * values into a task for which SVE is currently active.
810 * task must be non-runnable.
811 * task->thread.sve_state must point to at least sve_state_size(task)
812 * bytes of allocated kernel memory.
813 * task->thread.uw.fpsimd_state must already have been initialised with
814 * the new FPSIMD register values to be merged in.
816 void sve_sync_from_fpsimd_zeropad(struct task_struct
*task
)
819 void *sst
= task
->thread
.sve_state
;
820 struct user_fpsimd_state
const *fst
= &task
->thread
.uw
.fpsimd_state
;
822 if (!test_tsk_thread_flag(task
, TIF_SVE
) &&
823 !thread_sm_enabled(&task
->thread
))
826 vq
= sve_vq_from_vl(thread_get_cur_vl(&task
->thread
));
828 memset(sst
, 0, SVE_SIG_REGS_SIZE(vq
));
829 __fpsimd_to_sve(sst
, fst
, vq
);
832 int vec_set_vector_length(struct task_struct
*task
, enum vec_type type
,
833 unsigned long vl
, unsigned long flags
)
835 bool free_sme
= false;
837 if (flags
& ~(unsigned long)(PR_SVE_VL_INHERIT
|
838 PR_SVE_SET_VL_ONEXEC
))
841 if (!sve_vl_valid(vl
))
845 * Clamp to the maximum vector length that VL-agnostic code
846 * can work with. A flag may be assigned in the future to
847 * allow setting of larger vector lengths without confusing
850 if (vl
> VL_ARCH_MAX
)
853 vl
= find_supported_vector_length(type
, vl
);
855 if (flags
& (PR_SVE_VL_INHERIT
|
856 PR_SVE_SET_VL_ONEXEC
))
857 task_set_vl_onexec(task
, type
, vl
);
859 /* Reset VL to system default on next exec: */
860 task_set_vl_onexec(task
, type
, 0);
862 /* Only actually set the VL if not deferred: */
863 if (flags
& PR_SVE_SET_VL_ONEXEC
)
866 if (vl
== task_get_vl(task
, type
))
870 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
871 * write any live register state back to task_struct, and convert to a
872 * regular FPSIMD thread.
874 if (task
== current
) {
875 get_cpu_fpsimd_context();
877 fpsimd_save_user_state();
880 fpsimd_flush_task_state(task
);
881 if (test_and_clear_tsk_thread_flag(task
, TIF_SVE
) ||
882 thread_sm_enabled(&task
->thread
)) {
884 task
->thread
.fp_type
= FP_STATE_FPSIMD
;
887 if (system_supports_sme()) {
888 if (type
== ARM64_VEC_SME
||
889 !(task
->thread
.svcr
& (SVCR_SM_MASK
| SVCR_ZA_MASK
))) {
891 * We are changing the SME VL or weren't using
892 * SME anyway, discard the state and force a
895 task
->thread
.svcr
&= ~(SVCR_SM_MASK
|
897 clear_tsk_thread_flag(task
, TIF_SME
);
903 put_cpu_fpsimd_context();
905 task_set_vl(task
, type
, vl
);
908 * Free the changed states if they are not in use, SME will be
909 * reallocated to the correct size on next use and we just
910 * allocate SVE now in case it is needed for use in streaming
914 sve_alloc(task
, true);
920 update_tsk_thread_flag(task
, vec_vl_inherit_flag(type
),
921 flags
& PR_SVE_VL_INHERIT
);
927 * Encode the current vector length and flags for return.
928 * This is only required for prctl(): ptrace has separate fields.
929 * SVE and SME use the same bits for _ONEXEC and _INHERIT.
931 * flags are as for vec_set_vector_length().
933 static int vec_prctl_status(enum vec_type type
, unsigned long flags
)
937 if (flags
& PR_SVE_SET_VL_ONEXEC
)
938 ret
= task_get_vl_onexec(current
, type
);
940 ret
= task_get_vl(current
, type
);
942 if (test_thread_flag(vec_vl_inherit_flag(type
)))
943 ret
|= PR_SVE_VL_INHERIT
;
949 int sve_set_current_vl(unsigned long arg
)
951 unsigned long vl
, flags
;
954 vl
= arg
& PR_SVE_VL_LEN_MASK
;
957 if (!system_supports_sve() || is_compat_task())
960 ret
= vec_set_vector_length(current
, ARM64_VEC_SVE
, vl
, flags
);
964 return vec_prctl_status(ARM64_VEC_SVE
, flags
);
968 int sve_get_current_vl(void)
970 if (!system_supports_sve() || is_compat_task())
973 return vec_prctl_status(ARM64_VEC_SVE
, 0);
976 #ifdef CONFIG_ARM64_SME
978 int sme_set_current_vl(unsigned long arg
)
980 unsigned long vl
, flags
;
983 vl
= arg
& PR_SME_VL_LEN_MASK
;
986 if (!system_supports_sme() || is_compat_task())
989 ret
= vec_set_vector_length(current
, ARM64_VEC_SME
, vl
, flags
);
993 return vec_prctl_status(ARM64_VEC_SME
, flags
);
997 int sme_get_current_vl(void)
999 if (!system_supports_sme() || is_compat_task())
1002 return vec_prctl_status(ARM64_VEC_SME
, 0);
1004 #endif /* CONFIG_ARM64_SME */
1006 static void vec_probe_vqs(struct vl_info
*info
,
1007 DECLARE_BITMAP(map
, SVE_VQ_MAX
))
1009 unsigned int vq
, vl
;
1011 bitmap_zero(map
, SVE_VQ_MAX
);
1013 for (vq
= SVE_VQ_MAX
; vq
>= SVE_VQ_MIN
; --vq
) {
1014 write_vl(info
->type
, vq
- 1); /* self-syncing */
1016 switch (info
->type
) {
1028 /* Minimum VL identified? */
1029 if (sve_vq_from_vl(vl
) > vq
)
1032 vq
= sve_vq_from_vl(vl
); /* skip intervening lengths */
1033 set_bit(__vq_to_bit(vq
), map
);
1038 * Initialise the set of known supported VQs for the boot CPU.
1039 * This is called during kernel boot, before secondary CPUs are brought up.
1041 void __init
vec_init_vq_map(enum vec_type type
)
1043 struct vl_info
*info
= &vl_info
[type
];
1044 vec_probe_vqs(info
, info
->vq_map
);
1045 bitmap_copy(info
->vq_partial_map
, info
->vq_map
, SVE_VQ_MAX
);
1049 * If we haven't committed to the set of supported VQs yet, filter out
1050 * those not supported by the current CPU.
1051 * This function is called during the bring-up of early secondary CPUs only.
1053 void vec_update_vq_map(enum vec_type type
)
1055 struct vl_info
*info
= &vl_info
[type
];
1056 DECLARE_BITMAP(tmp_map
, SVE_VQ_MAX
);
1058 vec_probe_vqs(info
, tmp_map
);
1059 bitmap_and(info
->vq_map
, info
->vq_map
, tmp_map
, SVE_VQ_MAX
);
1060 bitmap_or(info
->vq_partial_map
, info
->vq_partial_map
, tmp_map
,
1065 * Check whether the current CPU supports all VQs in the committed set.
1066 * This function is called during the bring-up of late secondary CPUs only.
1068 int vec_verify_vq_map(enum vec_type type
)
1070 struct vl_info
*info
= &vl_info
[type
];
1071 DECLARE_BITMAP(tmp_map
, SVE_VQ_MAX
);
1074 vec_probe_vqs(info
, tmp_map
);
1076 bitmap_complement(tmp_map
, tmp_map
, SVE_VQ_MAX
);
1077 if (bitmap_intersects(tmp_map
, info
->vq_map
, SVE_VQ_MAX
)) {
1078 pr_warn("%s: cpu%d: Required vector length(s) missing\n",
1079 info
->name
, smp_processor_id());
1083 if (!IS_ENABLED(CONFIG_KVM
) || !is_hyp_mode_available())
1087 * For KVM, it is necessary to ensure that this CPU doesn't
1088 * support any vector length that guests may have probed as
1092 /* Recover the set of supported VQs: */
1093 bitmap_complement(tmp_map
, tmp_map
, SVE_VQ_MAX
);
1094 /* Find VQs supported that are not globally supported: */
1095 bitmap_andnot(tmp_map
, tmp_map
, info
->vq_map
, SVE_VQ_MAX
);
1097 /* Find the lowest such VQ, if any: */
1098 b
= find_last_bit(tmp_map
, SVE_VQ_MAX
);
1099 if (b
>= SVE_VQ_MAX
)
1100 return 0; /* no mismatches */
1103 * Mismatches above sve_max_virtualisable_vl are fine, since
1104 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
1106 if (sve_vl_from_vq(__bit_to_vq(b
)) <= info
->max_virtualisable_vl
) {
1107 pr_warn("%s: cpu%d: Unsupported vector length(s) present\n",
1108 info
->name
, smp_processor_id());
1115 static void __init
sve_efi_setup(void)
1120 if (!IS_ENABLED(CONFIG_EFI
))
1123 for (i
= 0; i
< ARRAY_SIZE(vl_info
); i
++)
1124 max_vl
= max(vl_info
[i
].max_vl
, max_vl
);
1127 * alloc_percpu() warns and prints a backtrace if this goes wrong.
1128 * This is evidence of a crippled system and we are returning void,
1129 * so no attempt is made to handle this situation here.
1131 if (!sve_vl_valid(max_vl
))
1134 efi_sve_state
= __alloc_percpu(
1135 SVE_SIG_REGS_SIZE(sve_vq_from_vl(max_vl
)), SVE_VQ_BYTES
);
1142 panic("Cannot allocate percpu memory for EFI SVE save/restore");
1145 void cpu_enable_sve(const struct arm64_cpu_capabilities
*__always_unused p
)
1147 write_sysreg(read_sysreg(CPACR_EL1
) | CPACR_EL1_ZEN_EL1EN
, CPACR_EL1
);
1150 write_sysreg_s(0, SYS_ZCR_EL1
);
1153 void __init
sve_setup(void)
1155 struct vl_info
*info
= &vl_info
[ARM64_VEC_SVE
];
1156 DECLARE_BITMAP(tmp_map
, SVE_VQ_MAX
);
1160 if (!system_supports_sve())
1164 * The SVE architecture mandates support for 128-bit vectors,
1165 * so sve_vq_map must have at least SVE_VQ_MIN set.
1166 * If something went wrong, at least try to patch it up:
1168 if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN
), info
->vq_map
)))
1169 set_bit(__vq_to_bit(SVE_VQ_MIN
), info
->vq_map
);
1171 max_bit
= find_first_bit(info
->vq_map
, SVE_VQ_MAX
);
1172 info
->max_vl
= sve_vl_from_vq(__bit_to_vq(max_bit
));
1175 * For the default VL, pick the maximum supported value <= 64.
1176 * VL == 64 is guaranteed not to grow the signal frame.
1178 set_sve_default_vl(find_supported_vector_length(ARM64_VEC_SVE
, 64));
1180 bitmap_andnot(tmp_map
, info
->vq_partial_map
, info
->vq_map
,
1183 b
= find_last_bit(tmp_map
, SVE_VQ_MAX
);
1184 if (b
>= SVE_VQ_MAX
)
1185 /* No non-virtualisable VLs found */
1186 info
->max_virtualisable_vl
= SVE_VQ_MAX
;
1187 else if (WARN_ON(b
== SVE_VQ_MAX
- 1))
1188 /* No virtualisable VLs? This is architecturally forbidden. */
1189 info
->max_virtualisable_vl
= SVE_VQ_MIN
;
1190 else /* b + 1 < SVE_VQ_MAX */
1191 info
->max_virtualisable_vl
= sve_vl_from_vq(__bit_to_vq(b
+ 1));
1193 if (info
->max_virtualisable_vl
> info
->max_vl
)
1194 info
->max_virtualisable_vl
= info
->max_vl
;
1196 pr_info("%s: maximum available vector length %u bytes per vector\n",
1197 info
->name
, info
->max_vl
);
1198 pr_info("%s: default vector length %u bytes per vector\n",
1199 info
->name
, get_sve_default_vl());
1201 /* KVM decides whether to support mismatched systems. Just warn here: */
1202 if (sve_max_virtualisable_vl() < sve_max_vl())
1203 pr_warn("%s: unvirtualisable vector lengths present\n",
1210 * Called from the put_task_struct() path, which cannot get here
1211 * unless dead_task is really dead and not schedulable.
1213 void fpsimd_release_task(struct task_struct
*dead_task
)
1215 __sve_free(dead_task
);
1216 sme_free(dead_task
);
1219 #endif /* CONFIG_ARM64_SVE */
1221 #ifdef CONFIG_ARM64_SME
1224 * Ensure that task->thread.sme_state is allocated and sufficiently large.
1226 * This function should be used only in preparation for replacing
1227 * task->thread.sme_state with new data. The memory is always zeroed
1228 * here to prevent stale data from showing through: this is done in
1229 * the interest of testability and predictability, the architecture
1230 * guarantees that when ZA is enabled it will be zeroed.
1232 void sme_alloc(struct task_struct
*task
, bool flush
)
1234 if (task
->thread
.sme_state
) {
1236 memset(task
->thread
.sme_state
, 0,
1237 sme_state_size(task
));
1241 /* This could potentially be up to 64K. */
1242 task
->thread
.sme_state
=
1243 kzalloc(sme_state_size(task
), GFP_KERNEL
);
1246 static void sme_free(struct task_struct
*task
)
1248 kfree(task
->thread
.sme_state
);
1249 task
->thread
.sme_state
= NULL
;
1252 void cpu_enable_sme(const struct arm64_cpu_capabilities
*__always_unused p
)
1254 /* Set priority for all PEs to architecturally defined minimum */
1255 write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1
) & ~SMPRI_EL1_PRIORITY_MASK
,
1258 /* Allow SME in kernel */
1259 write_sysreg(read_sysreg(CPACR_EL1
) | CPACR_EL1_SMEN_EL1EN
, CPACR_EL1
);
1262 /* Ensure all bits in SMCR are set to known values */
1263 write_sysreg_s(0, SYS_SMCR_EL1
);
1265 /* Allow EL0 to access TPIDR2 */
1266 write_sysreg(read_sysreg(SCTLR_EL1
) | SCTLR_ELx_ENTP2
, SCTLR_EL1
);
1270 void cpu_enable_sme2(const struct arm64_cpu_capabilities
*__always_unused p
)
1272 /* This must be enabled after SME */
1273 BUILD_BUG_ON(ARM64_SME2
<= ARM64_SME
);
1275 /* Allow use of ZT0 */
1276 write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1
) | SMCR_ELx_EZT0_MASK
,
1280 void cpu_enable_fa64(const struct arm64_cpu_capabilities
*__always_unused p
)
1282 /* This must be enabled after SME */
1283 BUILD_BUG_ON(ARM64_SME_FA64
<= ARM64_SME
);
1285 /* Allow use of FA64 */
1286 write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1
) | SMCR_ELx_FA64_MASK
,
1290 void __init
sme_setup(void)
1292 struct vl_info
*info
= &vl_info
[ARM64_VEC_SME
];
1293 int min_bit
, max_bit
;
1295 if (!system_supports_sme())
1299 * SME doesn't require any particular vector length be
1300 * supported but it does require at least one. We should have
1301 * disabled the feature entirely while bringing up CPUs but
1302 * let's double check here. The bitmap is SVE_VQ_MAP sized for
1305 WARN_ON(bitmap_empty(info
->vq_map
, SVE_VQ_MAX
));
1307 min_bit
= find_last_bit(info
->vq_map
, SVE_VQ_MAX
);
1308 info
->min_vl
= sve_vl_from_vq(__bit_to_vq(min_bit
));
1310 max_bit
= find_first_bit(info
->vq_map
, SVE_VQ_MAX
);
1311 info
->max_vl
= sve_vl_from_vq(__bit_to_vq(max_bit
));
1313 WARN_ON(info
->min_vl
> info
->max_vl
);
1316 * For the default VL, pick the maximum supported value <= 32
1317 * (256 bits) if there is one since this is guaranteed not to
1318 * grow the signal frame when in streaming mode, otherwise the
1319 * minimum available VL will be used.
1321 set_sme_default_vl(find_supported_vector_length(ARM64_VEC_SME
, 32));
1323 pr_info("SME: minimum available vector length %u bytes per vector\n",
1325 pr_info("SME: maximum available vector length %u bytes per vector\n",
1327 pr_info("SME: default vector length %u bytes per vector\n",
1328 get_sme_default_vl());
1331 void sme_suspend_exit(void)
1335 if (!system_supports_sme())
1338 if (system_supports_fa64())
1339 smcr
|= SMCR_ELx_FA64
;
1340 if (system_supports_sme2())
1341 smcr
|= SMCR_ELx_EZT0
;
1343 write_sysreg_s(smcr
, SYS_SMCR_EL1
);
1344 write_sysreg_s(0, SYS_SMPRI_EL1
);
1347 #endif /* CONFIG_ARM64_SME */
1349 static void sve_init_regs(void)
1352 * Convert the FPSIMD state to SVE, zeroing all the state that
1353 * is not shared with FPSIMD. If (as is likely) the current
1354 * state is live in the registers then do this there and
1355 * update our metadata for the current task including
1356 * disabling the trap, otherwise update our in-memory copy.
1357 * We are guaranteed to not be in streaming mode, we can only
1358 * take a SVE trap when not in streaming mode and we can't be
1359 * in streaming mode when taking a SME trap.
1361 if (!test_thread_flag(TIF_FOREIGN_FPSTATE
)) {
1362 unsigned long vq_minus_one
=
1363 sve_vq_from_vl(task_get_sve_vl(current
)) - 1;
1364 sve_set_vq(vq_minus_one
);
1365 sve_flush_live(true, vq_minus_one
);
1366 fpsimd_bind_task_to_cpu();
1368 fpsimd_to_sve(current
);
1369 current
->thread
.fp_type
= FP_STATE_SVE
;
1374 * Trapped SVE access
1376 * Storage is allocated for the full SVE state, the current FPSIMD
1377 * register contents are migrated across, and the access trap is
1380 * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
1381 * would have disabled the SVE access trap for userspace during
1382 * ret_to_user, making an SVE access trap impossible in that case.
1384 void do_sve_acc(unsigned long esr
, struct pt_regs
*regs
)
1386 /* Even if we chose not to use SVE, the hardware could still trap: */
1387 if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
1388 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
1392 sve_alloc(current
, true);
1393 if (!current
->thread
.sve_state
) {
1398 get_cpu_fpsimd_context();
1400 if (test_and_set_thread_flag(TIF_SVE
))
1401 WARN_ON(1); /* SVE access shouldn't have trapped */
1404 * Even if the task can have used streaming mode we can only
1405 * generate SVE access traps in normal SVE mode and
1406 * transitioning out of streaming mode may discard any
1407 * streaming mode state. Always clear the high bits to avoid
1408 * any potential errors tracking what is properly initialised.
1412 put_cpu_fpsimd_context();
1416 * Trapped SME access
1418 * Storage is allocated for the full SVE and SME state, the current
1419 * FPSIMD register contents are migrated to SVE if SVE is not already
1420 * active, and the access trap is disabled.
1422 * TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state()
1423 * would have disabled the SME access trap for userspace during
1424 * ret_to_user, making an SME access trap impossible in that case.
1426 void do_sme_acc(unsigned long esr
, struct pt_regs
*regs
)
1428 /* Even if we chose not to use SME, the hardware could still trap: */
1429 if (unlikely(!system_supports_sme()) || WARN_ON(is_compat_task())) {
1430 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
1435 * If this not a trap due to SME being disabled then something
1436 * is being used in the wrong mode, report as SIGILL.
1438 if (ESR_ELx_ISS(esr
) != ESR_ELx_SME_ISS_SME_DISABLED
) {
1439 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
1443 sve_alloc(current
, false);
1444 sme_alloc(current
, true);
1445 if (!current
->thread
.sve_state
|| !current
->thread
.sme_state
) {
1450 get_cpu_fpsimd_context();
1452 /* With TIF_SME userspace shouldn't generate any traps */
1453 if (test_and_set_thread_flag(TIF_SME
))
1456 if (!test_thread_flag(TIF_FOREIGN_FPSTATE
)) {
1457 unsigned long vq_minus_one
=
1458 sve_vq_from_vl(task_get_sme_vl(current
)) - 1;
1459 sme_set_vq(vq_minus_one
);
1461 fpsimd_bind_task_to_cpu();
1464 put_cpu_fpsimd_context();
1468 * Trapped FP/ASIMD access.
1470 void do_fpsimd_acc(unsigned long esr
, struct pt_regs
*regs
)
1472 /* Even if we chose not to use FPSIMD, the hardware could still trap: */
1473 if (!system_supports_fpsimd()) {
1474 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
, 0);
1479 * When FPSIMD is enabled, we should never take a trap unless something
1480 * has gone very wrong.
1486 * Raise a SIGFPE for the current process.
1488 void do_fpsimd_exc(unsigned long esr
, struct pt_regs
*regs
)
1490 unsigned int si_code
= FPE_FLTUNK
;
1492 if (esr
& ESR_ELx_FP_EXC_TFV
) {
1493 if (esr
& FPEXC_IOF
)
1494 si_code
= FPE_FLTINV
;
1495 else if (esr
& FPEXC_DZF
)
1496 si_code
= FPE_FLTDIV
;
1497 else if (esr
& FPEXC_OFF
)
1498 si_code
= FPE_FLTOVF
;
1499 else if (esr
& FPEXC_UFF
)
1500 si_code
= FPE_FLTUND
;
1501 else if (esr
& FPEXC_IXF
)
1502 si_code
= FPE_FLTRES
;
1505 send_sig_fault(SIGFPE
, si_code
,
1506 (void __user
*)instruction_pointer(regs
),
1510 static void fpsimd_load_kernel_state(struct task_struct
*task
)
1512 struct cpu_fp_state
*last
= this_cpu_ptr(&fpsimd_last_state
);
1515 * Elide the load if this CPU holds the most recent kernel mode
1516 * FPSIMD context of the current task.
1518 if (last
->st
== &task
->thread
.kernel_fpsimd_state
&&
1519 task
->thread
.kernel_fpsimd_cpu
== smp_processor_id())
1522 fpsimd_load_state(&task
->thread
.kernel_fpsimd_state
);
1525 static void fpsimd_save_kernel_state(struct task_struct
*task
)
1527 struct cpu_fp_state cpu_fp_state
= {
1528 .st
= &task
->thread
.kernel_fpsimd_state
,
1529 .to_save
= FP_STATE_FPSIMD
,
1532 fpsimd_save_state(&task
->thread
.kernel_fpsimd_state
);
1533 fpsimd_bind_state_to_cpu(&cpu_fp_state
);
1535 task
->thread
.kernel_fpsimd_cpu
= smp_processor_id();
1539 * Invalidate any task's FPSIMD state that is present on this cpu.
1540 * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
1541 * before calling this function.
1543 static void fpsimd_flush_cpu_state(void)
1545 WARN_ON(!system_supports_fpsimd());
1546 __this_cpu_write(fpsimd_last_state
.st
, NULL
);
1549 * Leaving streaming mode enabled will cause issues for any kernel
1550 * NEON and leaving streaming mode or ZA enabled may increase power
1553 if (system_supports_sme())
1556 set_thread_flag(TIF_FOREIGN_FPSTATE
);
1559 void fpsimd_thread_switch(struct task_struct
*next
)
1561 bool wrong_task
, wrong_cpu
;
1563 if (!system_supports_fpsimd())
1566 WARN_ON_ONCE(!irqs_disabled());
1568 /* Save unsaved fpsimd state, if any: */
1569 if (test_thread_flag(TIF_KERNEL_FPSTATE
))
1570 fpsimd_save_kernel_state(current
);
1572 fpsimd_save_user_state();
1574 if (test_tsk_thread_flag(next
, TIF_KERNEL_FPSTATE
)) {
1575 fpsimd_load_kernel_state(next
);
1576 fpsimd_flush_cpu_state();
1579 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
1580 * state. For kernel threads, FPSIMD registers are never
1581 * loaded with user mode FPSIMD state and so wrong_task and
1582 * wrong_cpu will always be true.
1584 wrong_task
= __this_cpu_read(fpsimd_last_state
.st
) !=
1585 &next
->thread
.uw
.fpsimd_state
;
1586 wrong_cpu
= next
->thread
.fpsimd_cpu
!= smp_processor_id();
1588 update_tsk_thread_flag(next
, TIF_FOREIGN_FPSTATE
,
1589 wrong_task
|| wrong_cpu
);
1593 static void fpsimd_flush_thread_vl(enum vec_type type
)
1595 int vl
, supported_vl
;
1598 * Reset the task vector length as required. This is where we
1599 * ensure that all user tasks have a valid vector length
1600 * configured: no kernel task can become a user task without
1601 * an exec and hence a call to this function. By the time the
1602 * first call to this function is made, all early hardware
1603 * probing is complete, so __sve_default_vl should be valid.
1604 * If a bug causes this to go wrong, we make some noise and
1605 * try to fudge thread.sve_vl to a safe value here.
1607 vl
= task_get_vl_onexec(current
, type
);
1609 vl
= get_default_vl(type
);
1611 if (WARN_ON(!sve_vl_valid(vl
)))
1612 vl
= vl_info
[type
].min_vl
;
1614 supported_vl
= find_supported_vector_length(type
, vl
);
1615 if (WARN_ON(supported_vl
!= vl
))
1618 task_set_vl(current
, type
, vl
);
1621 * If the task is not set to inherit, ensure that the vector
1622 * length will be reset by a subsequent exec:
1624 if (!test_thread_flag(vec_vl_inherit_flag(type
)))
1625 task_set_vl_onexec(current
, type
, 0);
1628 void fpsimd_flush_thread(void)
1630 void *sve_state
= NULL
;
1631 void *sme_state
= NULL
;
1633 if (!system_supports_fpsimd())
1636 get_cpu_fpsimd_context();
1638 fpsimd_flush_task_state(current
);
1639 memset(¤t
->thread
.uw
.fpsimd_state
, 0,
1640 sizeof(current
->thread
.uw
.fpsimd_state
));
1642 if (system_supports_sve()) {
1643 clear_thread_flag(TIF_SVE
);
1645 /* Defer kfree() while in atomic context */
1646 sve_state
= current
->thread
.sve_state
;
1647 current
->thread
.sve_state
= NULL
;
1649 fpsimd_flush_thread_vl(ARM64_VEC_SVE
);
1652 if (system_supports_sme()) {
1653 clear_thread_flag(TIF_SME
);
1655 /* Defer kfree() while in atomic context */
1656 sme_state
= current
->thread
.sme_state
;
1657 current
->thread
.sme_state
= NULL
;
1659 fpsimd_flush_thread_vl(ARM64_VEC_SME
);
1660 current
->thread
.svcr
= 0;
1663 current
->thread
.fp_type
= FP_STATE_FPSIMD
;
1665 put_cpu_fpsimd_context();
1671 * Save the userland FPSIMD state of 'current' to memory, but only if the state
1672 * currently held in the registers does in fact belong to 'current'
1674 void fpsimd_preserve_current_state(void)
1676 if (!system_supports_fpsimd())
1679 get_cpu_fpsimd_context();
1680 fpsimd_save_user_state();
1681 put_cpu_fpsimd_context();
1685 * Like fpsimd_preserve_current_state(), but ensure that
1686 * current->thread.uw.fpsimd_state is updated so that it can be copied to
1689 void fpsimd_signal_preserve_current_state(void)
1691 fpsimd_preserve_current_state();
1692 if (current
->thread
.fp_type
== FP_STATE_SVE
)
1693 sve_to_fpsimd(current
);
1697 * Called by KVM when entering the guest.
1699 void fpsimd_kvm_prepare(void)
1701 if (!system_supports_sve())
1705 * KVM does not save host SVE state since we can only enter
1706 * the guest from a syscall so the ABI means that only the
1707 * non-saved SVE state needs to be saved. If we have left
1708 * SVE enabled for performance reasons then update the task
1709 * state to be FPSIMD only.
1711 get_cpu_fpsimd_context();
1713 if (test_and_clear_thread_flag(TIF_SVE
)) {
1714 sve_to_fpsimd(current
);
1715 current
->thread
.fp_type
= FP_STATE_FPSIMD
;
1718 put_cpu_fpsimd_context();
1722 * Associate current's FPSIMD context with this cpu
1723 * The caller must have ownership of the cpu FPSIMD context before calling
1726 static void fpsimd_bind_task_to_cpu(void)
1728 struct cpu_fp_state
*last
= this_cpu_ptr(&fpsimd_last_state
);
1730 WARN_ON(!system_supports_fpsimd());
1731 last
->st
= ¤t
->thread
.uw
.fpsimd_state
;
1732 last
->sve_state
= current
->thread
.sve_state
;
1733 last
->sme_state
= current
->thread
.sme_state
;
1734 last
->sve_vl
= task_get_sve_vl(current
);
1735 last
->sme_vl
= task_get_sme_vl(current
);
1736 last
->svcr
= ¤t
->thread
.svcr
;
1737 last
->fpmr
= ¤t
->thread
.uw
.fpmr
;
1738 last
->fp_type
= ¤t
->thread
.fp_type
;
1739 last
->to_save
= FP_STATE_CURRENT
;
1740 current
->thread
.fpsimd_cpu
= smp_processor_id();
1743 * Toggle SVE and SME trapping for userspace if needed, these
1744 * are serialsied by ret_to_user().
1746 if (system_supports_sme()) {
1747 if (test_thread_flag(TIF_SME
))
1753 if (system_supports_sve()) {
1754 if (test_thread_flag(TIF_SVE
))
1761 void fpsimd_bind_state_to_cpu(struct cpu_fp_state
*state
)
1763 struct cpu_fp_state
*last
= this_cpu_ptr(&fpsimd_last_state
);
1765 WARN_ON(!system_supports_fpsimd());
1766 WARN_ON(!in_softirq() && !irqs_disabled());
1772 * Load the userland FPSIMD state of 'current' from memory, but only if the
1773 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1774 * state of 'current'. This is called when we are preparing to return to
1775 * userspace to ensure that userspace sees a good register state.
1777 void fpsimd_restore_current_state(void)
1780 * TIF_FOREIGN_FPSTATE is set on the init task and copied by
1781 * arch_dup_task_struct() regardless of whether FP/SIMD is detected.
1782 * Thus user threads can have this set even when FP/SIMD hasn't been
1785 * When FP/SIMD is detected, begin_new_exec() will set
1786 * TIF_FOREIGN_FPSTATE via flush_thread() -> fpsimd_flush_thread(),
1787 * and fpsimd_thread_switch() will set TIF_FOREIGN_FPSTATE when
1788 * switching tasks. We detect FP/SIMD before we exec the first user
1789 * process, ensuring this has TIF_FOREIGN_FPSTATE set and
1790 * do_notify_resume() will call fpsimd_restore_current_state() to
1791 * install the user FP/SIMD context.
1793 * When FP/SIMD is not detected, nothing else will clear or set
1794 * TIF_FOREIGN_FPSTATE prior to the first return to userspace, and
1795 * we must clear TIF_FOREIGN_FPSTATE to avoid do_notify_resume()
1796 * looping forever calling fpsimd_restore_current_state().
1798 if (!system_supports_fpsimd()) {
1799 clear_thread_flag(TIF_FOREIGN_FPSTATE
);
1803 get_cpu_fpsimd_context();
1805 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE
)) {
1807 fpsimd_bind_task_to_cpu();
1810 put_cpu_fpsimd_context();
1814 * Load an updated userland FPSIMD state for 'current' from memory and set the
1815 * flag that indicates that the FPSIMD register contents are the most recent
1816 * FPSIMD state of 'current'. This is used by the signal code to restore the
1817 * register state when returning from a signal handler in FPSIMD only cases,
1818 * any SVE context will be discarded.
1820 void fpsimd_update_current_state(struct user_fpsimd_state
const *state
)
1822 if (WARN_ON(!system_supports_fpsimd()))
1825 get_cpu_fpsimd_context();
1827 current
->thread
.uw
.fpsimd_state
= *state
;
1828 if (test_thread_flag(TIF_SVE
))
1829 fpsimd_to_sve(current
);
1832 fpsimd_bind_task_to_cpu();
1834 clear_thread_flag(TIF_FOREIGN_FPSTATE
);
1836 put_cpu_fpsimd_context();
1840 * Invalidate live CPU copies of task t's FPSIMD state
1842 * This function may be called with preemption enabled. The barrier()
1843 * ensures that the assignment to fpsimd_cpu is visible to any
1844 * preemption/softirq that could race with set_tsk_thread_flag(), so
1845 * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1847 * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
1850 void fpsimd_flush_task_state(struct task_struct
*t
)
1852 t
->thread
.fpsimd_cpu
= NR_CPUS
;
1854 * If we don't support fpsimd, bail out after we have
1855 * reset the fpsimd_cpu for this task and clear the
1858 if (!system_supports_fpsimd())
1861 set_tsk_thread_flag(t
, TIF_FOREIGN_FPSTATE
);
1867 * Save the FPSIMD state to memory and invalidate cpu view.
1868 * This function must be called with preemption disabled.
1870 void fpsimd_save_and_flush_cpu_state(void)
1872 unsigned long flags
;
1874 if (!system_supports_fpsimd())
1876 WARN_ON(preemptible());
1877 local_irq_save(flags
);
1878 fpsimd_save_user_state();
1879 fpsimd_flush_cpu_state();
1880 local_irq_restore(flags
);
1883 #ifdef CONFIG_KERNEL_MODE_NEON
1886 * Kernel-side NEON support functions
1890 * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1893 * Must not be called unless may_use_simd() returns true.
1894 * Task context in the FPSIMD registers is saved back to memory as necessary.
1896 * A matching call to kernel_neon_end() must be made before returning from the
1899 * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1902 void kernel_neon_begin(void)
1904 if (WARN_ON(!system_supports_fpsimd()))
1907 BUG_ON(!may_use_simd());
1909 get_cpu_fpsimd_context();
1911 /* Save unsaved fpsimd state, if any: */
1912 if (test_thread_flag(TIF_KERNEL_FPSTATE
)) {
1913 BUG_ON(IS_ENABLED(CONFIG_PREEMPT_RT
) || !in_serving_softirq());
1914 fpsimd_save_kernel_state(current
);
1916 fpsimd_save_user_state();
1919 * Set the thread flag so that the kernel mode FPSIMD state
1920 * will be context switched along with the rest of the task
1923 * On non-PREEMPT_RT, softirqs may interrupt task level kernel
1924 * mode FPSIMD, but the task will not be preemptible so setting
1925 * TIF_KERNEL_FPSTATE for those would be both wrong (as it
1926 * would mark the task context FPSIMD state as requiring a
1927 * context switch) and unnecessary.
1929 * On PREEMPT_RT, softirqs are serviced from a separate thread,
1930 * which is scheduled as usual, and this guarantees that these
1931 * softirqs are not interrupting use of the FPSIMD in kernel
1932 * mode in task context. So in this case, setting the flag here
1933 * is always appropriate.
1935 if (IS_ENABLED(CONFIG_PREEMPT_RT
) || !in_serving_softirq())
1936 set_thread_flag(TIF_KERNEL_FPSTATE
);
1939 /* Invalidate any task state remaining in the fpsimd regs: */
1940 fpsimd_flush_cpu_state();
1942 put_cpu_fpsimd_context();
1944 EXPORT_SYMBOL_GPL(kernel_neon_begin
);
1947 * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1949 * Must be called from a context in which kernel_neon_begin() was previously
1950 * called, with no call to kernel_neon_end() in the meantime.
1952 * The caller must not use the FPSIMD registers after this function is called,
1953 * unless kernel_neon_begin() is called again in the meantime.
1955 void kernel_neon_end(void)
1957 if (!system_supports_fpsimd())
1961 * If we are returning from a nested use of kernel mode FPSIMD, restore
1962 * the task context kernel mode FPSIMD state. This can only happen when
1963 * running in softirq context on non-PREEMPT_RT.
1965 if (!IS_ENABLED(CONFIG_PREEMPT_RT
) && in_serving_softirq() &&
1966 test_thread_flag(TIF_KERNEL_FPSTATE
))
1967 fpsimd_load_kernel_state(current
);
1969 clear_thread_flag(TIF_KERNEL_FPSTATE
);
1971 EXPORT_SYMBOL_GPL(kernel_neon_end
);
1975 static DEFINE_PER_CPU(struct user_fpsimd_state
, efi_fpsimd_state
);
1976 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used
);
1977 static DEFINE_PER_CPU(bool, efi_sve_state_used
);
1978 static DEFINE_PER_CPU(bool, efi_sm_state
);
1981 * EFI runtime services support functions
1983 * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1984 * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1985 * is always used rather than being an optional accelerator.
1987 * These functions provide the necessary support for ensuring FPSIMD
1988 * save/restore in the contexts from which EFI is used.
1990 * Do not use them for any other purpose -- if tempted to do so, you are
1991 * either doing something wrong or you need to propose some refactoring.
1995 * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1997 void __efi_fpsimd_begin(void)
1999 if (!system_supports_fpsimd())
2002 WARN_ON(preemptible());
2004 if (may_use_simd()) {
2005 kernel_neon_begin();
2008 * If !efi_sve_state, SVE can't be in use yet and doesn't need
2011 if (system_supports_sve() && likely(efi_sve_state
)) {
2012 char *sve_state
= this_cpu_ptr(efi_sve_state
);
2016 __this_cpu_write(efi_sve_state_used
, true);
2018 if (system_supports_sme()) {
2019 svcr
= read_sysreg_s(SYS_SVCR
);
2021 __this_cpu_write(efi_sm_state
,
2022 svcr
& SVCR_SM_MASK
);
2025 * Unless we have FA64 FFR does not
2026 * exist in streaming mode.
2028 if (!system_supports_fa64())
2029 ffr
= !(svcr
& SVCR_SM_MASK
);
2032 sve_save_state(sve_state
+ sve_ffr_offset(sve_max_vl()),
2033 &this_cpu_ptr(&efi_fpsimd_state
)->fpsr
,
2036 if (system_supports_sme())
2037 sysreg_clear_set_s(SYS_SVCR
,
2041 fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state
));
2044 __this_cpu_write(efi_fpsimd_state_used
, true);
2049 * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
2051 void __efi_fpsimd_end(void)
2053 if (!system_supports_fpsimd())
2056 if (!__this_cpu_xchg(efi_fpsimd_state_used
, false)) {
2059 if (system_supports_sve() &&
2060 likely(__this_cpu_read(efi_sve_state_used
))) {
2061 char const *sve_state
= this_cpu_ptr(efi_sve_state
);
2065 * Restore streaming mode; EFI calls are
2066 * normal function calls so should not return in
2069 if (system_supports_sme()) {
2070 if (__this_cpu_read(efi_sm_state
)) {
2071 sysreg_clear_set_s(SYS_SVCR
,
2076 * Unless we have FA64 FFR does not
2077 * exist in streaming mode.
2079 if (!system_supports_fa64())
2084 sve_load_state(sve_state
+ sve_ffr_offset(sve_max_vl()),
2085 &this_cpu_ptr(&efi_fpsimd_state
)->fpsr
,
2088 __this_cpu_write(efi_sve_state_used
, false);
2090 fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state
));
2095 #endif /* CONFIG_EFI */
2097 #endif /* CONFIG_KERNEL_MODE_NEON */
2099 #ifdef CONFIG_CPU_PM
2100 static int fpsimd_cpu_pm_notifier(struct notifier_block
*self
,
2101 unsigned long cmd
, void *v
)
2105 fpsimd_save_and_flush_cpu_state();
2109 case CPU_PM_ENTER_FAILED
:
2116 static struct notifier_block fpsimd_cpu_pm_notifier_block
= {
2117 .notifier_call
= fpsimd_cpu_pm_notifier
,
2120 static void __init
fpsimd_pm_init(void)
2122 cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block
);
2126 static inline void fpsimd_pm_init(void) { }
2127 #endif /* CONFIG_CPU_PM */
2129 #ifdef CONFIG_HOTPLUG_CPU
2130 static int fpsimd_cpu_dead(unsigned int cpu
)
2132 per_cpu(fpsimd_last_state
.st
, cpu
) = NULL
;
2136 static inline void fpsimd_hotplug_init(void)
2138 cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD
, "arm64/fpsimd:dead",
2139 NULL
, fpsimd_cpu_dead
);
2143 static inline void fpsimd_hotplug_init(void) { }
2146 void cpu_enable_fpsimd(const struct arm64_cpu_capabilities
*__always_unused p
)
2148 unsigned long enable
= CPACR_EL1_FPEN_EL1EN
| CPACR_EL1_FPEN_EL0EN
;
2149 write_sysreg(read_sysreg(CPACR_EL1
) | enable
, CPACR_EL1
);
2154 * FP/SIMD support code initialisation.
2156 static int __init
fpsimd_init(void)
2158 if (cpu_have_named_feature(FP
)) {
2160 fpsimd_hotplug_init();
2162 pr_notice("Floating-point is not implemented\n");
2165 if (!cpu_have_named_feature(ASIMD
))
2166 pr_notice("Advanced SIMD is not implemented\n");
2174 core_initcall(fpsimd_init
);