WIP FPC-III support
[linux/fpc-iii.git] / arch / arm64 / include / asm / fpsimdmacros.h
blobaf43367534c7a5db2c1092799bb8a17de92df90d
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * FP/SIMD state saving and restoring macros
5 * Copyright (C) 2012 ARM Ltd.
6 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 */
9 .macro fpsimd_save state, tmpnr
10 stp q0, q1, [\state, #16 * 0]
11 stp q2, q3, [\state, #16 * 2]
12 stp q4, q5, [\state, #16 * 4]
13 stp q6, q7, [\state, #16 * 6]
14 stp q8, q9, [\state, #16 * 8]
15 stp q10, q11, [\state, #16 * 10]
16 stp q12, q13, [\state, #16 * 12]
17 stp q14, q15, [\state, #16 * 14]
18 stp q16, q17, [\state, #16 * 16]
19 stp q18, q19, [\state, #16 * 18]
20 stp q20, q21, [\state, #16 * 20]
21 stp q22, q23, [\state, #16 * 22]
22 stp q24, q25, [\state, #16 * 24]
23 stp q26, q27, [\state, #16 * 26]
24 stp q28, q29, [\state, #16 * 28]
25 stp q30, q31, [\state, #16 * 30]!
26 mrs x\tmpnr, fpsr
27 str w\tmpnr, [\state, #16 * 2]
28 mrs x\tmpnr, fpcr
29 str w\tmpnr, [\state, #16 * 2 + 4]
30 .endm
32 .macro fpsimd_restore_fpcr state, tmp
34 * Writes to fpcr may be self-synchronising, so avoid restoring
35 * the register if it hasn't changed.
37 mrs \tmp, fpcr
38 cmp \tmp, \state
39 b.eq 9999f
40 msr fpcr, \state
41 9999:
42 .endm
44 /* Clobbers \state */
45 .macro fpsimd_restore state, tmpnr
46 ldp q0, q1, [\state, #16 * 0]
47 ldp q2, q3, [\state, #16 * 2]
48 ldp q4, q5, [\state, #16 * 4]
49 ldp q6, q7, [\state, #16 * 6]
50 ldp q8, q9, [\state, #16 * 8]
51 ldp q10, q11, [\state, #16 * 10]
52 ldp q12, q13, [\state, #16 * 12]
53 ldp q14, q15, [\state, #16 * 14]
54 ldp q16, q17, [\state, #16 * 16]
55 ldp q18, q19, [\state, #16 * 18]
56 ldp q20, q21, [\state, #16 * 20]
57 ldp q22, q23, [\state, #16 * 22]
58 ldp q24, q25, [\state, #16 * 24]
59 ldp q26, q27, [\state, #16 * 26]
60 ldp q28, q29, [\state, #16 * 28]
61 ldp q30, q31, [\state, #16 * 30]!
62 ldr w\tmpnr, [\state, #16 * 2]
63 msr fpsr, x\tmpnr
64 ldr w\tmpnr, [\state, #16 * 2 + 4]
65 fpsimd_restore_fpcr x\tmpnr, \state
66 .endm
68 /* Sanity-check macros to help avoid encoding garbage instructions */
70 .macro _check_general_reg nr
71 .if (\nr) < 0 || (\nr) > 30
72 .error "Bad register number \nr."
73 .endif
74 .endm
76 .macro _sve_check_zreg znr
77 .if (\znr) < 0 || (\znr) > 31
78 .error "Bad Scalable Vector Extension vector register number \znr."
79 .endif
80 .endm
82 .macro _sve_check_preg pnr
83 .if (\pnr) < 0 || (\pnr) > 15
84 .error "Bad Scalable Vector Extension predicate register number \pnr."
85 .endif
86 .endm
88 .macro _check_num n, min, max
89 .if (\n) < (\min) || (\n) > (\max)
90 .error "Number \n out of range [\min,\max]"
91 .endif
92 .endm
94 /* SVE instruction encodings for non-SVE-capable assemblers */
96 /* STR (vector): STR Z\nz, [X\nxbase, #\offset, MUL VL] */
97 .macro _sve_str_v nz, nxbase, offset=0
98 _sve_check_zreg \nz
99 _check_general_reg \nxbase
100 _check_num (\offset), -0x100, 0xff
101 .inst 0xe5804000 \
102 | (\nz) \
103 | ((\nxbase) << 5) \
104 | (((\offset) & 7) << 10) \
105 | (((\offset) & 0x1f8) << 13)
106 .endm
108 /* LDR (vector): LDR Z\nz, [X\nxbase, #\offset, MUL VL] */
109 .macro _sve_ldr_v nz, nxbase, offset=0
110 _sve_check_zreg \nz
111 _check_general_reg \nxbase
112 _check_num (\offset), -0x100, 0xff
113 .inst 0x85804000 \
114 | (\nz) \
115 | ((\nxbase) << 5) \
116 | (((\offset) & 7) << 10) \
117 | (((\offset) & 0x1f8) << 13)
118 .endm
120 /* STR (predicate): STR P\np, [X\nxbase, #\offset, MUL VL] */
121 .macro _sve_str_p np, nxbase, offset=0
122 _sve_check_preg \np
123 _check_general_reg \nxbase
124 _check_num (\offset), -0x100, 0xff
125 .inst 0xe5800000 \
126 | (\np) \
127 | ((\nxbase) << 5) \
128 | (((\offset) & 7) << 10) \
129 | (((\offset) & 0x1f8) << 13)
130 .endm
132 /* LDR (predicate): LDR P\np, [X\nxbase, #\offset, MUL VL] */
133 .macro _sve_ldr_p np, nxbase, offset=0
134 _sve_check_preg \np
135 _check_general_reg \nxbase
136 _check_num (\offset), -0x100, 0xff
137 .inst 0x85800000 \
138 | (\np) \
139 | ((\nxbase) << 5) \
140 | (((\offset) & 7) << 10) \
141 | (((\offset) & 0x1f8) << 13)
142 .endm
144 /* RDVL X\nx, #\imm */
145 .macro _sve_rdvl nx, imm
146 _check_general_reg \nx
147 _check_num (\imm), -0x20, 0x1f
148 .inst 0x04bf5000 \
149 | (\nx) \
150 | (((\imm) & 0x3f) << 5)
151 .endm
153 /* RDFFR (unpredicated): RDFFR P\np.B */
154 .macro _sve_rdffr np
155 _sve_check_preg \np
156 .inst 0x2519f000 \
157 | (\np)
158 .endm
160 /* WRFFR P\np.B */
161 .macro _sve_wrffr np
162 _sve_check_preg \np
163 .inst 0x25289000 \
164 | ((\np) << 5)
165 .endm
167 /* PFALSE P\np.B */
168 .macro _sve_pfalse np
169 _sve_check_preg \np
170 .inst 0x2518e400 \
171 | (\np)
172 .endm
174 .macro __for from:req, to:req
175 .if (\from) == (\to)
176 _for__body %\from
177 .else
178 __for %\from, %((\from) + ((\to) - (\from)) / 2)
179 __for %((\from) + ((\to) - (\from)) / 2 + 1), %\to
180 .endif
181 .endm
183 .macro _for var:req, from:req, to:req, insn:vararg
184 .macro _for__body \var:req
185 .noaltmacro
186 \insn
187 .altmacro
188 .endm
190 .altmacro
191 __for \from, \to
192 .noaltmacro
194 .purgem _for__body
195 .endm
197 /* Update ZCR_EL1.LEN with the new VQ */
198 .macro sve_load_vq xvqminus1, xtmp, xtmp2
199 mrs_s \xtmp, SYS_ZCR_EL1
200 bic \xtmp2, \xtmp, ZCR_ELx_LEN_MASK
201 orr \xtmp2, \xtmp2, \xvqminus1
202 cmp \xtmp2, \xtmp
203 b.eq 921f
204 msr_s SYS_ZCR_EL1, \xtmp2 //self-synchronising
205 921:
206 .endm
208 /* Preserve the first 128-bits of Znz and zero the rest. */
209 .macro _sve_flush_z nz
210 _sve_check_zreg \nz
211 mov v\nz\().16b, v\nz\().16b
212 .endm
214 .macro sve_flush
215 _for n, 0, 31, _sve_flush_z \n
216 _for n, 0, 15, _sve_pfalse \n
217 _sve_wrffr 0
218 .endm
220 .macro sve_save nxbase, xpfpsr, nxtmp
221 _for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34
222 _for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16
223 _sve_rdffr 0
224 _sve_str_p 0, \nxbase
225 _sve_ldr_p 0, \nxbase, -16
227 mrs x\nxtmp, fpsr
228 str w\nxtmp, [\xpfpsr]
229 mrs x\nxtmp, fpcr
230 str w\nxtmp, [\xpfpsr, #4]
231 .endm
233 .macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
234 sve_load_vq \xvqminus1, x\nxtmp, \xtmp2
235 _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
236 _sve_ldr_p 0, \nxbase
237 _sve_wrffr 0
238 _for n, 0, 15, _sve_ldr_p \n, \nxbase, \n - 16
240 ldr w\nxtmp, [\xpfpsr]
241 msr fpsr, x\nxtmp
242 ldr w\nxtmp, [\xpfpsr, #4]
243 msr fpcr, x\nxtmp
244 .endm