2 * ChaCha20 256-bit cipher algorithm, RFC7539, ARM NEON functions
4 * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSE3 functions
13 * Copyright (C) 2015 Martin Willi
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
21 #include <linux/linkage.h>
27 ENTRY(chacha20_block_xor_neon)
28 // r0: Input state matrix, s
29 // r1: 1 data block output, o
30 // r2: 1 data block input, i
33 // This function encrypts one ChaCha20 block by loading the state matrix
34 // in four NEON registers. It performs matrix operation on four words in
35 // parallel, but requireds shuffling to rearrange the words after each
52 // x0 += x1, x3 = rotl32(x3 ^ x0, 16)
58 // x2 += x3, x1 = rotl32(x1 ^ x2, 12)
64 // x0 += x1, x3 = rotl32(x3 ^ x0, 8)
70 // x2 += x3, x1 = rotl32(x1 ^ x2, 7)
76 // x1 = shuffle32(x1, MASK(0, 3, 2, 1))
78 // x2 = shuffle32(x2, MASK(1, 0, 3, 2))
80 // x3 = shuffle32(x3, MASK(2, 1, 0, 3))
81 vext.8 q3, q3, q3, #12
83 // x0 += x1, x3 = rotl32(x3 ^ x0, 16)
89 // x2 += x3, x1 = rotl32(x1 ^ x2, 12)
95 // x0 += x1, x3 = rotl32(x3 ^ x0, 8)
101 // x2 += x3, x1 = rotl32(x1 ^ x2, 7)
107 // x1 = shuffle32(x1, MASK(2, 1, 0, 3))
108 vext.8 q1, q1, q1, #12
109 // x2 = shuffle32(x2, MASK(1, 0, 3, 2))
110 vext.8 q2, q2, q2, #8
111 // x3 = shuffle32(x3, MASK(0, 3, 2, 1))
112 vext.8 q3, q3, q3, #4
121 // o0 = i0 ^ (x0 + s0)
125 // o1 = i1 ^ (x1 + s1)
129 // o2 = i2 ^ (x2 + s2)
133 // o3 = i3 ^ (x3 + s3)
142 ENDPROC(chacha20_block_xor_neon)
145 ENTRY(chacha20_4block_xor_neon)
147 mov ip, sp // preserve the stack pointer
148 sub r3, sp, #0x20 // allocate a 32 byte buffer
149 bic r3, r3, #0x1f // aligned to 32 bytes
152 // r0: Input state matrix, s
153 // r1: 4 data blocks output, o
154 // r2: 4 data blocks input, i
157 // This function encrypts four consecutive ChaCha20 blocks by loading
158 // the state matrix in NEON registers four times. The algorithm performs
159 // each operation on the corresponding word of each state matrix, hence
160 // requires no word shuffling. For final XORing step we transpose the
161 // matrix by interleaving 32- and then 64-bit words, which allows us to
162 // do XOR in NEON registers.
165 // x0..15[0-3] = s0..3[0..3]
167 vld1.32 {q0-q1}, [r0]
168 vld1.32 {q2-q3}, [r3]
173 vld1.32 {q11}, [r3, :128]
176 vadd.i32 q12, q12, q11 // x12 += counter values 0-3
193 // x0 += x4, x12 = rotl32(x12 ^ x0, 16)
194 // x1 += x5, x13 = rotl32(x13 ^ x1, 16)
195 // x2 += x6, x14 = rotl32(x14 ^ x2, 16)
196 // x3 += x7, x15 = rotl32(x15 ^ x3, 16)
212 // x8 += x12, x4 = rotl32(x4 ^ x8, 12)
213 // x9 += x13, x5 = rotl32(x5 ^ x9, 12)
214 // x10 += x14, x6 = rotl32(x6 ^ x10, 12)
215 // x11 += x15, x7 = rotl32(x7 ^ x11, 12)
218 vadd.i32 q10, q10, q14
219 vadd.i32 q11, q11, q15
221 vst1.32 {q8-q9}, [sp, :256]
237 // x0 += x4, x12 = rotl32(x12 ^ x0, 8)
238 // x1 += x5, x13 = rotl32(x13 ^ x1, 8)
239 // x2 += x6, x14 = rotl32(x14 ^ x2, 8)
240 // x3 += x7, x15 = rotl32(x15 ^ x3, 8)
250 vsri.u32 q12, q8, #24
251 vsri.u32 q13, q9, #24
257 vsri.u32 q14, q8, #24
258 vsri.u32 q15, q9, #24
260 vld1.32 {q8-q9}, [sp, :256]
262 // x8 += x12, x4 = rotl32(x4 ^ x8, 7)
263 // x9 += x13, x5 = rotl32(x5 ^ x9, 7)
264 // x10 += x14, x6 = rotl32(x6 ^ x10, 7)
265 // x11 += x15, x7 = rotl32(x7 ^ x11, 7)
268 vadd.i32 q10, q10, q14
269 vadd.i32 q11, q11, q15
271 vst1.32 {q8-q9}, [sp, :256]
287 vld1.32 {q8-q9}, [sp, :256]
289 // x0 += x5, x15 = rotl32(x15 ^ x0, 16)
290 // x1 += x6, x12 = rotl32(x12 ^ x1, 16)
291 // x2 += x7, x13 = rotl32(x13 ^ x2, 16)
292 // x3 += x4, x14 = rotl32(x14 ^ x3, 16)
308 // x10 += x15, x5 = rotl32(x5 ^ x10, 12)
309 // x11 += x12, x6 = rotl32(x6 ^ x11, 12)
310 // x8 += x13, x7 = rotl32(x7 ^ x8, 12)
311 // x9 += x14, x4 = rotl32(x4 ^ x9, 12)
312 vadd.i32 q10, q10, q15
313 vadd.i32 q11, q11, q12
317 vst1.32 {q8-q9}, [sp, :256]
333 // x0 += x5, x15 = rotl32(x15 ^ x0, 8)
334 // x1 += x6, x12 = rotl32(x12 ^ x1, 8)
335 // x2 += x7, x13 = rotl32(x13 ^ x2, 8)
336 // x3 += x4, x14 = rotl32(x14 ^ x3, 8)
346 vsri.u32 q15, q8, #24
347 vsri.u32 q12, q9, #24
353 vsri.u32 q13, q8, #24
354 vsri.u32 q14, q9, #24
356 vld1.32 {q8-q9}, [sp, :256]
358 // x10 += x15, x5 = rotl32(x5 ^ x10, 7)
359 // x11 += x12, x6 = rotl32(x6 ^ x11, 7)
360 // x8 += x13, x7 = rotl32(x7 ^ x8, 7)
361 // x9 += x14, x4 = rotl32(x4 ^ x9, 7)
362 vadd.i32 q10, q10, q15
363 vadd.i32 q11, q11, q12
367 vst1.32 {q8-q9}, [sp, :256]
386 vld1.32 {q8-q9}, [sp, :256]
393 0: ldmia r0!, {r3-r6}
417 // interleave 32-bit words in state n, n+1
423 // interleave 64-bit words in state n, n+2
429 // xor with corresponding input, write to output
430 vld1.8 {q8-q9}, [r2]!
433 vst1.8 {q8-q9}, [r1]!
435 vld1.32 {q8-q9}, [sp, :256]
448 vadd.i32 q10, q10, q0
449 vadd.i32 q11, q11, q4
459 vadd.i32 q12, q12, q0
460 vld1.32 {q0}, [r3, :128]
461 vadd.i32 q13, q13, q4
462 vadd.i32 q12, q12, q0 // x12 += counter values 0-3
466 vadd.i32 q14, q14, q0
467 vadd.i32 q15, q15, q4
469 // interleave 32-bit words in state n, n+1
475 // interleave 64-bit words in state n, n+2
483 vld1.8 {q0-q1}, [r2]!
486 vst1.8 {q0-q1}, [r1]!
488 vld1.8 {q0-q1}, [r2]!
491 vst1.8 {q0-q1}, [r1]!
493 vld1.8 {q0-q1}, [r2]!
496 vst1.8 {q0-q1}, [r1]!
498 vld1.8 {q0-q1}, [r2]!
501 vst1.8 {q0-q1}, [r1]!
503 vld1.8 {q0-q1}, [r2]!
506 vst1.8 {q0-q1}, [r1]!
508 vld1.8 {q0-q1}, [r2]!
511 vst1.8 {q0-q1}, [r1]!
520 ENDPROC(chacha20_4block_xor_neon)
523 CTRINC: .word 0, 1, 2, 3