2 * ChaCha20 256-bit cipher algorithm, RFC7539, ARM NEON functions
4 * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSE3 functions
13 * Copyright (C) 2015 Martin Willi
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
22 * NEON doesn't have a rotate instruction. The alternatives are, more or less:
24 * (a) vshl.u32 + vsri.u32 (needs temporary register)
25 * (b) vshl.u32 + vshr.u32 + vorr (needs temporary register)
26 * (c) vrev32.16 (16-bit rotations only)
27 * (d) vtbl.8 + vtbl.8 (multiple of 8 bits rotations only,
30 * ChaCha20 has 16, 12, 8, and 7-bit rotations. For the 12 and 7-bit
31 * rotations, the only choices are (a) and (b). We use (a) since it takes
32 * two-thirds the cycles of (b) on both Cortex-A7 and Cortex-A53.
34 * For the 16-bit rotation, we use vrev32.16 since it's consistently fastest
35 * and doesn't need a temporary register.
37 * For the 8-bit rotation, we use vtbl.8 + vtbl.8. On Cortex-A7, this sequence
38 * is twice as fast as (a), even when doing (a) on multiple registers
39 * simultaneously to eliminate the stall between vshl and vsri. Also, it
40 * parallelizes better when temporary registers are scarce.
42 * A disadvantage is that on Cortex-A53, the vtbl sequence is the same speed as
43 * (a), so the need to load the rotation table actually makes the vtbl method
44 * slightly slower overall on that CPU (~1.3% slower ChaCha20). Still, it
45 * seems to be a good compromise to get a more significant speed boost on some
46 * CPUs, e.g. ~4.8% faster ChaCha20 on Cortex-A7.
49 #include <linux/linkage.h>
55 ENTRY(chacha20_block_xor_neon)
56 // r0: Input state matrix, s
57 // r1: 1 data block output, o
58 // r2: 1 data block input, i
61 // This function encrypts one ChaCha20 block by loading the state matrix
62 // in four NEON registers. It performs matrix operation on four words in
63 // parallel, but requireds shuffling to rearrange the words after each
79 vld1.8 {d10}, [ip, :64]
82 // x0 += x1, x3 = rotl32(x3 ^ x0, 16)
87 // x2 += x3, x1 = rotl32(x1 ^ x2, 12)
93 // x0 += x1, x3 = rotl32(x3 ^ x0, 8)
99 // x2 += x3, x1 = rotl32(x1 ^ x2, 7)
105 // x1 = shuffle32(x1, MASK(0, 3, 2, 1))
106 vext.8 q1, q1, q1, #4
107 // x2 = shuffle32(x2, MASK(1, 0, 3, 2))
108 vext.8 q2, q2, q2, #8
109 // x3 = shuffle32(x3, MASK(2, 1, 0, 3))
110 vext.8 q3, q3, q3, #12
112 // x0 += x1, x3 = rotl32(x3 ^ x0, 16)
117 // x2 += x3, x1 = rotl32(x1 ^ x2, 12)
123 // x0 += x1, x3 = rotl32(x3 ^ x0, 8)
129 // x2 += x3, x1 = rotl32(x1 ^ x2, 7)
135 // x1 = shuffle32(x1, MASK(2, 1, 0, 3))
136 vext.8 q1, q1, q1, #12
137 // x2 = shuffle32(x2, MASK(1, 0, 3, 2))
138 vext.8 q2, q2, q2, #8
139 // x3 = shuffle32(x3, MASK(0, 3, 2, 1))
140 vext.8 q3, q3, q3, #4
149 // o0 = i0 ^ (x0 + s0)
153 // o1 = i1 ^ (x1 + s1)
157 // o2 = i2 ^ (x2 + s2)
161 // o3 = i3 ^ (x3 + s3)
170 ENDPROC(chacha20_block_xor_neon)
173 .Lctrinc: .word 0, 1, 2, 3
174 .Lrol8_table: .byte 3, 0, 1, 2, 7, 4, 5, 6
177 ENTRY(chacha20_4block_xor_neon)
179 mov r4, sp // preserve the stack pointer
180 sub ip, sp, #0x20 // allocate a 32 byte buffer
181 bic ip, ip, #0x1f // aligned to 32 bytes
184 // r0: Input state matrix, s
185 // r1: 4 data blocks output, o
186 // r2: 4 data blocks input, i
189 // This function encrypts four consecutive ChaCha20 blocks by loading
190 // the state matrix in NEON registers four times. The algorithm performs
191 // each operation on the corresponding word of each state matrix, hence
192 // requires no word shuffling. The words are re-interleaved before the
193 // final addition of the original state and the XORing step.
196 // x0..15[0-3] = s0..15[0-3]
198 vld1.32 {q0-q1}, [r0]
199 vld1.32 {q2-q3}, [ip]
204 vld1.32 {q4}, [r5, :128]
209 vadd.u32 q12, q12, q4 // x12 += counter values 0-3
226 vld1.32 {q8-q9}, [sp, :256]
228 // x0 += x4, x12 = rotl32(x12 ^ x0, 16)
229 // x1 += x5, x13 = rotl32(x13 ^ x1, 16)
230 // x2 += x6, x14 = rotl32(x14 ^ x2, 16)
231 // x3 += x7, x15 = rotl32(x15 ^ x3, 16)
247 // x8 += x12, x4 = rotl32(x4 ^ x8, 12)
248 // x9 += x13, x5 = rotl32(x5 ^ x9, 12)
249 // x10 += x14, x6 = rotl32(x6 ^ x10, 12)
250 // x11 += x15, x7 = rotl32(x7 ^ x11, 12)
253 vadd.i32 q10, q10, q14
254 vadd.i32 q11, q11, q15
256 vst1.32 {q8-q9}, [sp, :256]
272 // x0 += x4, x12 = rotl32(x12 ^ x0, 8)
273 // x1 += x5, x13 = rotl32(x13 ^ x1, 8)
274 // x2 += x6, x14 = rotl32(x14 ^ x2, 8)
275 // x3 += x7, x15 = rotl32(x15 ^ x3, 8)
276 vld1.8 {d16}, [ip, :64]
287 vtbl.8 d24, {d24}, d16
288 vtbl.8 d25, {d25}, d16
289 vtbl.8 d26, {d26}, d16
290 vtbl.8 d27, {d27}, d16
291 vtbl.8 d28, {d28}, d16
292 vtbl.8 d29, {d29}, d16
293 vtbl.8 d30, {d30}, d16
294 vtbl.8 d31, {d31}, d16
296 vld1.32 {q8-q9}, [sp, :256]
298 // x8 += x12, x4 = rotl32(x4 ^ x8, 7)
299 // x9 += x13, x5 = rotl32(x5 ^ x9, 7)
300 // x10 += x14, x6 = rotl32(x6 ^ x10, 7)
301 // x11 += x15, x7 = rotl32(x7 ^ x11, 7)
304 vadd.i32 q10, q10, q14
305 vadd.i32 q11, q11, q15
307 vst1.32 {q8-q9}, [sp, :256]
323 vld1.32 {q8-q9}, [sp, :256]
325 // x0 += x5, x15 = rotl32(x15 ^ x0, 16)
326 // x1 += x6, x12 = rotl32(x12 ^ x1, 16)
327 // x2 += x7, x13 = rotl32(x13 ^ x2, 16)
328 // x3 += x4, x14 = rotl32(x14 ^ x3, 16)
344 // x10 += x15, x5 = rotl32(x5 ^ x10, 12)
345 // x11 += x12, x6 = rotl32(x6 ^ x11, 12)
346 // x8 += x13, x7 = rotl32(x7 ^ x8, 12)
347 // x9 += x14, x4 = rotl32(x4 ^ x9, 12)
348 vadd.i32 q10, q10, q15
349 vadd.i32 q11, q11, q12
353 vst1.32 {q8-q9}, [sp, :256]
369 // x0 += x5, x15 = rotl32(x15 ^ x0, 8)
370 // x1 += x6, x12 = rotl32(x12 ^ x1, 8)
371 // x2 += x7, x13 = rotl32(x13 ^ x2, 8)
372 // x3 += x4, x14 = rotl32(x14 ^ x3, 8)
373 vld1.8 {d16}, [ip, :64]
384 vtbl.8 d30, {d30}, d16
385 vtbl.8 d31, {d31}, d16
386 vtbl.8 d24, {d24}, d16
387 vtbl.8 d25, {d25}, d16
388 vtbl.8 d26, {d26}, d16
389 vtbl.8 d27, {d27}, d16
390 vtbl.8 d28, {d28}, d16
391 vtbl.8 d29, {d29}, d16
393 vld1.32 {q8-q9}, [sp, :256]
395 // x10 += x15, x5 = rotl32(x5 ^ x10, 7)
396 // x11 += x12, x6 = rotl32(x6 ^ x11, 7)
397 // x8 += x13, x7 = rotl32(x7 ^ x8, 7)
398 // x9 += x14, x4 = rotl32(x4 ^ x9, 7)
399 vadd.i32 q10, q10, q15
400 vadd.i32 q11, q11, q12
404 vst1.32 {q8-q9}, [sp, :256]
423 // x0..7[0-3] are in q0-q7, x10..15[0-3] are in q10-q15.
424 // x8..9[0-3] are on the stack.
426 // Re-interleave the words in the first two rows of each block (x0..7).
427 // Also add the counter values 0-3 to x12[0-3].
428 vld1.32 {q8}, [r5, :128] // load counter values 0-3
429 vzip.32 q0, q1 // => (0 1 0 1) (0 1 0 1)
430 vzip.32 q2, q3 // => (2 3 2 3) (2 3 2 3)
431 vzip.32 q4, q5 // => (4 5 4 5) (4 5 4 5)
432 vzip.32 q6, q7 // => (6 7 6 7) (6 7 6 7)
433 vadd.u32 q12, q8 // x12 += counter values 0-3
436 vld1.32 {q8-q9}, [r0]! // load s0..7
440 // Swap q1 and q4 so that we'll free up consecutive registers (q0-q1)
441 // after XORing the first 32 bytes.
444 // First two rows of each block are (q0 q1) (q2 q6) (q4 q5) (q3 q7)
446 // x0..3[0-3] += s0..3[0-3] (add orig state to 1st row of each block)
452 // x4..7[0-3] += s4..7[0-3] (add orig state to 2nd row of each block)
458 // XOR first 32 bytes using keystream from first two rows of first block
459 vld1.8 {q8-q9}, [r2]!
462 vst1.8 {q8-q9}, [r1]!
464 // Re-interleave the words in the last two rows of each block (x8..15).
465 vld1.32 {q8-q9}, [sp, :256]
466 vzip.32 q12, q13 // => (12 13 12 13) (12 13 12 13)
467 vzip.32 q14, q15 // => (14 15 14 15) (14 15 14 15)
468 vzip.32 q8, q9 // => (8 9 8 9) (8 9 8 9)
469 vzip.32 q10, q11 // => (10 11 10 11) (10 11 10 11)
470 vld1.32 {q0-q1}, [r0] // load s8..15
476 // Last two rows of each block are (q8 q12) (q10 q14) (q9 q13) (q11 q15)
478 // x8..11[0-3] += s8..11[0-3] (add orig state to 3rd row of each block)
480 vadd.u32 q10, q10, q0
482 vadd.u32 q11, q11, q0
484 // x12..15[0-3] += s12..15[0-3] (add orig state to 4th row of each block)
485 vadd.u32 q12, q12, q1
486 vadd.u32 q14, q14, q1
487 vadd.u32 q13, q13, q1
488 vadd.u32 q15, q15, q1
490 // XOR the rest of the data with the keystream
492 vld1.8 {q0-q1}, [r2]!
495 vst1.8 {q0-q1}, [r1]!
497 vld1.8 {q0-q1}, [r2]!
500 vst1.8 {q0-q1}, [r1]!
502 vld1.8 {q0-q1}, [r2]!
505 vst1.8 {q0-q1}, [r1]!
507 vld1.8 {q0-q1}, [r2]!
510 vst1.8 {q0-q1}, [r1]!
512 vld1.8 {q0-q1}, [r2]!
515 vst1.8 {q0-q1}, [r1]!
517 vld1.8 {q0-q1}, [r2]!
520 vst1.8 {q0-q1}, [r1]!
523 mov sp, r4 // restore original stack pointer
530 ENDPROC(chacha20_4block_xor_neon)