1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (C) 2020 SiFive
6 #ifndef __ASM_RISCV_VECTOR_H
7 #define __ASM_RISCV_VECTOR_H
9 #include <linux/types.h>
10 #include <uapi/asm-generic/errno.h>
12 #ifdef CONFIG_RISCV_ISA_V
14 #include <linux/stringify.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <asm/ptrace.h>
18 #include <asm/cpufeature.h>
22 extern unsigned long riscv_v_vsize
;
23 int riscv_v_setup_vsize(void);
24 bool riscv_v_first_use_handler(struct pt_regs
*regs
);
25 void kernel_vector_begin(void);
26 void kernel_vector_end(void);
27 void get_cpu_vector_context(void);
28 void put_cpu_vector_context(void);
29 void riscv_v_thread_free(struct task_struct
*tsk
);
30 void __init
riscv_v_setup_ctx_cache(void);
31 void riscv_v_thread_alloc(struct task_struct
*tsk
);
33 static inline u32
riscv_v_flags(void)
35 return READ_ONCE(current
->thread
.riscv_v_flags
);
38 static __always_inline
bool has_vector(void)
40 return riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE32X
);
43 static inline void __riscv_v_vstate_clean(struct pt_regs
*regs
)
45 regs
->status
= (regs
->status
& ~SR_VS
) | SR_VS_CLEAN
;
48 static inline void __riscv_v_vstate_dirty(struct pt_regs
*regs
)
50 regs
->status
= (regs
->status
& ~SR_VS
) | SR_VS_DIRTY
;
53 static inline void riscv_v_vstate_off(struct pt_regs
*regs
)
55 regs
->status
= (regs
->status
& ~SR_VS
) | SR_VS_OFF
;
58 static inline void riscv_v_vstate_on(struct pt_regs
*regs
)
60 regs
->status
= (regs
->status
& ~SR_VS
) | SR_VS_INITIAL
;
63 static inline bool riscv_v_vstate_query(struct pt_regs
*regs
)
65 return (regs
->status
& SR_VS
) != 0;
68 static __always_inline
void riscv_v_enable(void)
70 csr_set(CSR_SSTATUS
, SR_VS
);
73 static __always_inline
void riscv_v_disable(void)
75 csr_clear(CSR_SSTATUS
, SR_VS
);
78 static __always_inline
void __vstate_csr_save(struct __riscv_v_ext_state
*dest
)
81 "csrr %0, " __stringify(CSR_VSTART
) "\n\t"
82 "csrr %1, " __stringify(CSR_VTYPE
) "\n\t"
83 "csrr %2, " __stringify(CSR_VL
) "\n\t"
84 "csrr %3, " __stringify(CSR_VCSR
) "\n\t"
85 "csrr %4, " __stringify(CSR_VLENB
) "\n\t"
86 : "=r" (dest
->vstart
), "=r" (dest
->vtype
), "=r" (dest
->vl
),
87 "=r" (dest
->vcsr
), "=r" (dest
->vlenb
) : :);
90 static __always_inline
void __vstate_csr_restore(struct __riscv_v_ext_state
*src
)
94 ".option arch, +zve32x\n\t"
95 "vsetvl x0, %2, %1\n\t"
97 "csrw " __stringify(CSR_VSTART
) ", %0\n\t"
98 "csrw " __stringify(CSR_VCSR
) ", %3\n\t"
99 : : "r" (src
->vstart
), "r" (src
->vtype
), "r" (src
->vl
),
103 static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state
*save_to
,
109 __vstate_csr_save(save_to
);
112 ".option arch, +zve32x\n\t"
113 "vsetvli %0, x0, e8, m8, ta, ma\n\t"
114 "vse8.v v0, (%1)\n\t"
116 "vse8.v v8, (%1)\n\t"
118 "vse8.v v16, (%1)\n\t"
120 "vse8.v v24, (%1)\n\t"
122 : "=&r" (vl
) : "r" (datap
) : "memory");
126 static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state
*restore_from
,
134 ".option arch, +zve32x\n\t"
135 "vsetvli %0, x0, e8, m8, ta, ma\n\t"
136 "vle8.v v0, (%1)\n\t"
138 "vle8.v v8, (%1)\n\t"
140 "vle8.v v16, (%1)\n\t"
142 "vle8.v v24, (%1)\n\t"
144 : "=&r" (vl
) : "r" (datap
) : "memory");
145 __vstate_csr_restore(restore_from
);
149 static inline void __riscv_v_vstate_discard(void)
151 unsigned long vl
, vtype_inval
= 1UL << (BITS_PER_LONG
- 1);
156 ".option arch, +zve32x\n\t"
157 "vsetvli %0, x0, e8, m8, ta, ma\n\t"
160 "vmv.v.i v16, -1\n\t"
161 "vmv.v.i v24, -1\n\t"
162 "vsetvl %0, x0, %1\n\t"
164 : "=&r" (vl
) : "r" (vtype_inval
) : "memory");
168 static inline void riscv_v_vstate_discard(struct pt_regs
*regs
)
170 if ((regs
->status
& SR_VS
) == SR_VS_OFF
)
173 __riscv_v_vstate_discard();
174 __riscv_v_vstate_dirty(regs
);
177 static inline void riscv_v_vstate_save(struct __riscv_v_ext_state
*vstate
,
178 struct pt_regs
*regs
)
180 if ((regs
->status
& SR_VS
) == SR_VS_DIRTY
) {
181 __riscv_v_vstate_save(vstate
, vstate
->datap
);
182 __riscv_v_vstate_clean(regs
);
186 static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state
*vstate
,
187 struct pt_regs
*regs
)
189 if ((regs
->status
& SR_VS
) != SR_VS_OFF
) {
190 __riscv_v_vstate_restore(vstate
, vstate
->datap
);
191 __riscv_v_vstate_clean(regs
);
195 static inline void riscv_v_vstate_set_restore(struct task_struct
*task
,
196 struct pt_regs
*regs
)
198 if ((regs
->status
& SR_VS
) != SR_VS_OFF
) {
199 set_tsk_thread_flag(task
, TIF_RISCV_V_DEFER_RESTORE
);
200 riscv_v_vstate_on(regs
);
204 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
205 static inline bool riscv_preempt_v_dirty(struct task_struct
*task
)
207 return !!(task
->thread
.riscv_v_flags
& RISCV_PREEMPT_V_DIRTY
);
210 static inline bool riscv_preempt_v_restore(struct task_struct
*task
)
212 return !!(task
->thread
.riscv_v_flags
& RISCV_PREEMPT_V_NEED_RESTORE
);
215 static inline void riscv_preempt_v_clear_dirty(struct task_struct
*task
)
218 task
->thread
.riscv_v_flags
&= ~RISCV_PREEMPT_V_DIRTY
;
221 static inline void riscv_preempt_v_set_restore(struct task_struct
*task
)
224 task
->thread
.riscv_v_flags
|= RISCV_PREEMPT_V_NEED_RESTORE
;
227 static inline bool riscv_preempt_v_started(struct task_struct
*task
)
229 return !!(task
->thread
.riscv_v_flags
& RISCV_PREEMPT_V
);
232 #else /* !CONFIG_RISCV_ISA_V_PREEMPTIVE */
233 static inline bool riscv_preempt_v_dirty(struct task_struct
*task
) { return false; }
234 static inline bool riscv_preempt_v_restore(struct task_struct
*task
) { return false; }
235 static inline bool riscv_preempt_v_started(struct task_struct
*task
) { return false; }
236 #define riscv_preempt_v_clear_dirty(tsk) do {} while (0)
237 #define riscv_preempt_v_set_restore(tsk) do {} while (0)
238 #endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
240 static inline void __switch_to_vector(struct task_struct
*prev
,
241 struct task_struct
*next
)
243 struct pt_regs
*regs
;
245 if (riscv_preempt_v_started(prev
)) {
246 if (riscv_preempt_v_dirty(prev
)) {
247 __riscv_v_vstate_save(&prev
->thread
.kernel_vstate
,
248 prev
->thread
.kernel_vstate
.datap
);
249 riscv_preempt_v_clear_dirty(prev
);
252 regs
= task_pt_regs(prev
);
253 riscv_v_vstate_save(&prev
->thread
.vstate
, regs
);
256 if (riscv_preempt_v_started(next
))
257 riscv_preempt_v_set_restore(next
);
259 riscv_v_vstate_set_restore(next
, task_pt_regs(next
));
262 void riscv_v_vstate_ctrl_init(struct task_struct
*tsk
);
263 bool riscv_v_vstate_ctrl_user_allowed(void);
265 #else /* ! CONFIG_RISCV_ISA_V */
269 static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP
; }
270 static __always_inline
bool has_vector(void) { return false; }
271 static inline bool riscv_v_first_use_handler(struct pt_regs
*regs
) { return false; }
272 static inline bool riscv_v_vstate_query(struct pt_regs
*regs
) { return false; }
273 static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
274 #define riscv_v_vsize (0)
275 #define riscv_v_vstate_discard(regs) do {} while (0)
276 #define riscv_v_vstate_save(vstate, regs) do {} while (0)
277 #define riscv_v_vstate_restore(vstate, regs) do {} while (0)
278 #define __switch_to_vector(__prev, __next) do {} while (0)
279 #define riscv_v_vstate_off(regs) do {} while (0)
280 #define riscv_v_vstate_on(regs) do {} while (0)
281 #define riscv_v_thread_free(tsk) do {} while (0)
282 #define riscv_v_setup_ctx_cache() do {} while (0)
283 #define riscv_v_thread_alloc(tsk) do {} while (0)
285 #endif /* CONFIG_RISCV_ISA_V */
288 * Return the implementation's vlen value.
290 * riscv_v_vsize contains the value of "32 vector registers with vlenb length"
291 * so rebuild the vlen value in bits from it.
293 static inline int riscv_vector_vlen(void)
295 return riscv_v_vsize
/ 32 * 8;
298 #endif /* ! __ASM_RISCV_VECTOR_H */