1 // SPDX-License-Identifier: GPL-2.0
3 * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
5 * Copyright (c) 2018 Google, Inc
7 * Author: Eric Biggers <ebiggers@google.com>
10 #include <linux/linkage.h>
15 ROUND_KEYS .req x0 // const {u64,u32} *round_keys
16 NROUNDS .req w1 // int nrounds
18 DST .req x2 // void *dst
19 SRC .req x3 // const void *src
20 NBYTES .req w4 // unsigned int nbytes
21 TWEAK .req x5 // void *tweak
23 // registers which hold the data being encrypted/decrypted
24 // (underscores avoid a naming collision with ARM64 registers x0-x3)
34 // the round key, duplicated in all lanes
37 // index vector for tbl-based 8-bit rotates
39 ROTATE_TABLE_Q .req q9
41 // temporary registers
47 // multiplication table for updating XTS tweaks
49 GFMUL_TABLE_Q .req q14
51 // next XTS tweak value(s)
54 // XTS tweaks for the blocks currently being encrypted/decrypted
66 .octa 0x080f0e0d0c0b0a090007060504030201
68 .octa 0x0c0f0e0d080b0a090407060500030201
70 .octa 0x0e0d0c0b0a09080f0605040302010007
72 .octa 0x0e0d0c0f0a09080b0605040702010003
74 .octa 0x00000000000000870000000000000001
76 .octa 0x0000000000000000000000002d361b00
79 * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
81 * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
82 * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
83 * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
84 * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
86 .macro _speck_round_128bytes n, lanes
89 tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
90 tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
91 tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
92 tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
95 add X_0.\lanes, X_0.\lanes, Y_0.\lanes
96 add X_1.\lanes, X_1.\lanes, Y_1.\lanes
97 add X_2.\lanes, X_2.\lanes, Y_2.\lanes
98 add X_3.\lanes, X_3.\lanes, Y_3.\lanes
101 eor X_0.16b, X_0.16b, ROUND_KEY.16b
102 eor X_1.16b, X_1.16b, ROUND_KEY.16b
103 eor X_2.16b, X_2.16b, ROUND_KEY.16b
104 eor X_3.16b, X_3.16b, ROUND_KEY.16b
107 shl TMP0.\lanes, Y_0.\lanes, #3
108 shl TMP1.\lanes, Y_1.\lanes, #3
109 shl TMP2.\lanes, Y_2.\lanes, #3
110 shl TMP3.\lanes, Y_3.\lanes, #3
111 sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
112 sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
113 sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
114 sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
117 eor Y_0.16b, TMP0.16b, X_0.16b
118 eor Y_1.16b, TMP1.16b, X_1.16b
119 eor Y_2.16b, TMP2.16b, X_2.16b
120 eor Y_3.16b, TMP3.16b, X_3.16b
124 * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
126 * This is the inverse of _speck_round_128bytes().
128 .macro _speck_unround_128bytes n, lanes
131 eor TMP0.16b, Y_0.16b, X_0.16b
132 eor TMP1.16b, Y_1.16b, X_1.16b
133 eor TMP2.16b, Y_2.16b, X_2.16b
134 eor TMP3.16b, Y_3.16b, X_3.16b
137 ushr Y_0.\lanes, TMP0.\lanes, #3
138 ushr Y_1.\lanes, TMP1.\lanes, #3
139 ushr Y_2.\lanes, TMP2.\lanes, #3
140 ushr Y_3.\lanes, TMP3.\lanes, #3
141 sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
142 sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
143 sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
144 sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
147 eor X_0.16b, X_0.16b, ROUND_KEY.16b
148 eor X_1.16b, X_1.16b, ROUND_KEY.16b
149 eor X_2.16b, X_2.16b, ROUND_KEY.16b
150 eor X_3.16b, X_3.16b, ROUND_KEY.16b
153 sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
154 sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
155 sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
156 sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
159 tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
160 tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
161 tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
162 tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
165 .macro _next_xts_tweak next, cur, tmp, n
168 * Calculate the next tweak by multiplying the current one by x,
169 * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
171 sshr \tmp\().2d, \cur\().2d, #63
172 and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
173 shl \next\().2d, \cur\().2d, #1
174 ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
175 eor \next\().16b, \next\().16b, \tmp\().16b
178 * Calculate the next two tweaks by multiplying the current ones by x^2,
179 * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
181 ushr \tmp\().2d, \cur\().2d, #62
182 shl \next\().2d, \cur\().2d, #2
183 tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
184 eor \next\().16b, \next\().16b, \tmp\().16b
189 * _speck_xts_crypt() - Speck-XTS encryption/decryption
191 * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
192 * using Speck-XTS, specifically the variant with a block size of '2n' and round
193 * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
194 * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
195 * nonzero multiple of 128.
197 .macro _speck_xts_crypt n, lanes, decrypting
200 * If decrypting, modify the ROUND_KEYS parameter to point to the last
201 * round key rather than the first, since for decryption the round keys
202 * are used in reverse order.
205 mov NROUNDS, NROUNDS /* zero the high 32 bits */
207 add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
208 sub ROUND_KEYS, ROUND_KEYS, #8
210 add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
211 sub ROUND_KEYS, ROUND_KEYS, #4
215 // Load the index vector for tbl-based 8-bit rotates
217 ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
219 ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
222 // One-time XTS preparation
225 ld1 {TWEAKV0.16b}, [TWEAK]
227 // Load GF(2^128) multiplication table
228 ldr GFMUL_TABLE_Q, .Lgf128mul_table
231 ld1 {TWEAKV0.8b}, [TWEAK]
233 // Load GF(2^64) multiplication table
234 ldr GFMUL_TABLE_Q, .Lgf64mul_table
236 // Calculate second tweak, packing it together with the first
237 ushr TMP0.2d, TWEAKV0.2d, #63
238 shl TMP1.2d, TWEAKV0.2d, #1
239 tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
240 eor TMP0.8b, TMP0.8b, TMP1.8b
241 mov TWEAKV0.d[1], TMP0.d[0]
246 // Calculate XTS tweaks for next 128 bytes
247 _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
248 _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
249 _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
250 _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
251 _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
252 _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
253 _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
254 _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
256 // Load the next source blocks into {X,Y}[0-3]
257 ld1 {X_0.16b-Y_1.16b}, [SRC], #64
258 ld1 {X_2.16b-Y_3.16b}, [SRC], #64
260 // XOR the source blocks with their XTS tweaks
261 eor TMP0.16b, X_0.16b, TWEAKV0.16b
262 eor Y_0.16b, Y_0.16b, TWEAKV1.16b
263 eor TMP1.16b, X_1.16b, TWEAKV2.16b
264 eor Y_1.16b, Y_1.16b, TWEAKV3.16b
265 eor TMP2.16b, X_2.16b, TWEAKV4.16b
266 eor Y_2.16b, Y_2.16b, TWEAKV5.16b
267 eor TMP3.16b, X_3.16b, TWEAKV6.16b
268 eor Y_3.16b, Y_3.16b, TWEAKV7.16b
271 * De-interleave the 'x' and 'y' elements of each block, i.e. make it so
272 * that the X[0-3] registers contain only the second halves of blocks,
273 * and the Y[0-3] registers contain only the first halves of blocks.
274 * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
276 uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
277 uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
278 uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
279 uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
280 uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
281 uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
282 uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
283 uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
285 // Do the cipher rounds
290 ld1r {ROUND_KEY.\lanes}, [x6]
291 sub x6, x6, #( \n / 8 )
292 _speck_unround_128bytes \n, \lanes
294 ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
295 _speck_round_128bytes \n, \lanes
300 // Re-interleave the 'x' and 'y' elements of each block
301 zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
302 zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
303 zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
304 zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
305 zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
306 zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
307 zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
308 zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
310 // XOR the encrypted/decrypted blocks with the tweaks calculated earlier
311 eor X_0.16b, TMP0.16b, TWEAKV0.16b
312 eor Y_0.16b, Y_0.16b, TWEAKV1.16b
313 eor X_1.16b, TMP1.16b, TWEAKV2.16b
314 eor Y_1.16b, Y_1.16b, TWEAKV3.16b
315 eor X_2.16b, TMP2.16b, TWEAKV4.16b
316 eor Y_2.16b, Y_2.16b, TWEAKV5.16b
317 eor X_3.16b, TMP3.16b, TWEAKV6.16b
318 eor Y_3.16b, Y_3.16b, TWEAKV7.16b
319 mov TWEAKV0.16b, TWEAKV_NEXT.16b
321 // Store the ciphertext in the destination buffer
322 st1 {X_0.16b-Y_1.16b}, [DST], #64
323 st1 {X_2.16b-Y_3.16b}, [DST], #64
325 // Continue if there are more 128-byte chunks remaining
326 subs NBYTES, NBYTES, #128
327 bne .Lnext_128bytes_\@
329 // Store the next tweak and return
331 st1 {TWEAKV_NEXT.16b}, [TWEAK]
333 st1 {TWEAKV_NEXT.8b}, [TWEAK]
338 ENTRY(speck128_xts_encrypt_neon)
339 _speck_xts_crypt n=64, lanes=2d, decrypting=0
340 ENDPROC(speck128_xts_encrypt_neon)
342 ENTRY(speck128_xts_decrypt_neon)
343 _speck_xts_crypt n=64, lanes=2d, decrypting=1
344 ENDPROC(speck128_xts_decrypt_neon)
346 ENTRY(speck64_xts_encrypt_neon)
347 _speck_xts_crypt n=32, lanes=4s, decrypting=0
348 ENDPROC(speck64_xts_encrypt_neon)
350 ENTRY(speck64_xts_decrypt_neon)
351 _speck_xts_crypt n=32, lanes=4s, decrypting=1
352 ENDPROC(speck64_xts_decrypt_neon)