1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2012 ARM Ltd.
4 * Author: Catalin Marinas <catalin.marinas@arm.com>
5 * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
6 * Copyright (C) 2021 SiFive
8 #include <linux/compiler.h>
9 #include <linux/irqflags.h>
10 #include <linux/percpu.h>
11 #include <linux/preempt.h>
12 #include <linux/types.h>
14 #include <asm/vector.h>
15 #include <asm/switch_to.h>
17 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
18 #include <asm/asm-prototypes.h>
21 static inline void riscv_v_flags_set(u32 flags
)
23 WRITE_ONCE(current
->thread
.riscv_v_flags
, flags
);
26 static inline void riscv_v_start(u32 flags
)
30 orig
= riscv_v_flags();
31 BUG_ON((orig
& flags
) != 0);
32 riscv_v_flags_set(orig
| flags
);
36 static inline void riscv_v_stop(u32 flags
)
41 orig
= riscv_v_flags();
42 BUG_ON((orig
& flags
) == 0);
43 riscv_v_flags_set(orig
& ~flags
);
47 * Claim ownership of the CPU vector context for use by the calling context.
49 * The caller may freely manipulate the vector context metadata until
50 * put_cpu_vector_context() is called.
52 void get_cpu_vector_context(void)
55 * disable softirqs so it is impossible for softirqs to nest
56 * get_cpu_vector_context() when kernel is actively using Vector.
58 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
63 riscv_v_start(RISCV_KERNEL_MODE_V
);
67 * Release the CPU vector context.
69 * Must be called from a context in which get_cpu_vector_context() was
70 * previously called, with no call to put_cpu_vector_context() in the
73 void put_cpu_vector_context(void)
75 riscv_v_stop(RISCV_KERNEL_MODE_V
);
77 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
83 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
84 static __always_inline u32
*riscv_v_flags_ptr(void)
86 return ¤t
->thread
.riscv_v_flags
;
89 static inline void riscv_preempt_v_set_dirty(void)
91 *riscv_v_flags_ptr() |= RISCV_PREEMPT_V_DIRTY
;
94 static inline void riscv_preempt_v_reset_flags(void)
96 *riscv_v_flags_ptr() &= ~(RISCV_PREEMPT_V_DIRTY
| RISCV_PREEMPT_V_NEED_RESTORE
);
99 static inline void riscv_v_ctx_depth_inc(void)
101 *riscv_v_flags_ptr() += RISCV_V_CTX_UNIT_DEPTH
;
104 static inline void riscv_v_ctx_depth_dec(void)
106 *riscv_v_flags_ptr() -= RISCV_V_CTX_UNIT_DEPTH
;
109 static inline u32
riscv_v_ctx_get_depth(void)
111 return *riscv_v_flags_ptr() & RISCV_V_CTX_DEPTH_MASK
;
114 static int riscv_v_stop_kernel_context(void)
116 if (riscv_v_ctx_get_depth() != 0 || !riscv_preempt_v_started(current
))
119 riscv_preempt_v_clear_dirty(current
);
120 riscv_v_stop(RISCV_PREEMPT_V
);
124 static int riscv_v_start_kernel_context(bool *is_nested
)
126 struct __riscv_v_ext_state
*kvstate
, *uvstate
;
128 kvstate
= ¤t
->thread
.kernel_vstate
;
132 if (riscv_preempt_v_started(current
)) {
133 WARN_ON(riscv_v_ctx_get_depth() == 0);
135 get_cpu_vector_context();
136 if (riscv_preempt_v_dirty(current
)) {
137 __riscv_v_vstate_save(kvstate
, kvstate
->datap
);
138 riscv_preempt_v_clear_dirty(current
);
140 riscv_preempt_v_set_restore(current
);
144 /* Transfer the ownership of V from user to kernel, then save */
145 riscv_v_start(RISCV_PREEMPT_V
| RISCV_PREEMPT_V_DIRTY
);
146 if ((task_pt_regs(current
)->status
& SR_VS
) == SR_VS_DIRTY
) {
147 uvstate
= ¤t
->thread
.vstate
;
148 __riscv_v_vstate_save(uvstate
, uvstate
->datap
);
150 riscv_preempt_v_clear_dirty(current
);
154 /* low-level V context handling code, called with irq disabled */
155 asmlinkage
void riscv_v_context_nesting_start(struct pt_regs
*regs
)
159 if (!riscv_preempt_v_started(current
))
162 depth
= riscv_v_ctx_get_depth();
163 if (depth
== 0 && (regs
->status
& SR_VS
) == SR_VS_DIRTY
)
164 riscv_preempt_v_set_dirty();
166 riscv_v_ctx_depth_inc();
169 asmlinkage
void riscv_v_context_nesting_end(struct pt_regs
*regs
)
171 struct __riscv_v_ext_state
*vstate
= ¤t
->thread
.kernel_vstate
;
174 WARN_ON(!irqs_disabled());
176 if (!riscv_preempt_v_started(current
))
179 riscv_v_ctx_depth_dec();
180 depth
= riscv_v_ctx_get_depth();
182 if (riscv_preempt_v_restore(current
)) {
183 __riscv_v_vstate_restore(vstate
, vstate
->datap
);
184 __riscv_v_vstate_clean(regs
);
185 riscv_preempt_v_reset_flags();
190 #define riscv_v_start_kernel_context(nested) (-ENOENT)
191 #define riscv_v_stop_kernel_context() (-ENOENT)
192 #endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
195 * kernel_vector_begin(): obtain the CPU vector registers for use by the calling
198 * Must not be called unless may_use_simd() returns true.
199 * Task context in the vector registers is saved back to memory as necessary.
201 * A matching call to kernel_vector_end() must be made before returning from the
204 * The caller may freely use the vector registers until kernel_vector_end() is
207 void kernel_vector_begin(void)
211 if (WARN_ON(!has_vector()))
214 BUG_ON(!may_use_simd());
216 if (riscv_v_start_kernel_context(&nested
)) {
217 get_cpu_vector_context();
218 riscv_v_vstate_save(¤t
->thread
.vstate
, task_pt_regs(current
));
222 riscv_v_vstate_set_restore(current
, task_pt_regs(current
));
226 EXPORT_SYMBOL_GPL(kernel_vector_begin
);
229 * kernel_vector_end(): give the CPU vector registers back to the current task
231 * Must be called from a context in which kernel_vector_begin() was previously
232 * called, with no call to kernel_vector_end() in the meantime.
234 * The caller must not use the vector registers after this function is called,
235 * unless kernel_vector_begin() is called again in the meantime.
237 void kernel_vector_end(void)
239 if (WARN_ON(!has_vector()))
244 if (riscv_v_stop_kernel_context())
245 put_cpu_vector_context();
247 EXPORT_SYMBOL_GPL(kernel_vector_end
);