2 * Implement AES CTR mode by8 optimization with AVX instructions. (x86_64)
4 * This is AES128/192/256 CTR mode optimization implementation. It requires
5 * the support of Intel(R) AESNI and AVX instructions.
7 * This work was inspired by the AES CTR mode optimization published
8 * in Intel Optimized IPSEC Cryptograhpic library.
9 * Additional information on it can be found at:
10 * http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
12 * This file is provided under a dual BSD/GPLv2 license. When using or
13 * redistributing this file, you may do so under either license.
17 * Copyright(c) 2014 Intel Corporation.
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of version 2 of the GNU General Public License as
21 * published by the Free Software Foundation.
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
28 * Contact Information:
29 * James Guilford <james.guilford@intel.com>
30 * Sean Gulley <sean.m.gulley@intel.com>
31 * Chandramouli Narayanan <mouli@linux.intel.com>
35 * Copyright(c) 2014 Intel Corporation.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * Neither the name of Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 #include <linux/linkage.h>
67 #define VMOVDQ vmovdqu
77 #define xcounter %xmm8
78 #define xbyteswap %xmm9
103 .octa 0x000102030405060708090A0B0C0D0E0F
105 .octa 0x0000000000000000FFFFFFFFFFFFFFFF
107 .octa 0x00000000000000010000000000000000
109 .octa 0x00000000000000000000000000000001
111 .octa 0x00000000000000000000000000000002
113 .octa 0x00000000000000000000000000000003
115 .octa 0x00000000000000000000000000000004
117 .octa 0x00000000000000000000000000000005
119 .octa 0x00000000000000000000000000000006
121 .octa 0x00000000000000000000000000000007
123 .octa 0x00000000000000000000000000000008
127 /* generate a unique variable for ddq_add_x */
129 /* generate a unique variable for xmm register */
134 /* club the numeric 'id' to the symbol 'name' */
145 * do_aes num_in_par load_keys key_len
146 * This increments p_in, but not p_out
148 .macro do_aes b, k, key_len
154 vmovdqa 0*16(p_keys), xkey0
157 vpshufb xbyteswap, xcounter, xdata0
162 vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata
163 vptest ddq_low_msk(%rip), var_xdata
165 vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata
166 vpaddq ddq_high_add_1(%rip), xcounter, xcounter
168 vpshufb xbyteswap, var_xdata, var_xdata
172 vmovdqa 1*16(p_keys), xkeyA
174 vpxor xkey0, xdata0, xdata0
175 vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter
176 vptest ddq_low_msk(%rip), xcounter
178 vpaddq ddq_high_add_1(%rip), xcounter, xcounter
184 vpxor xkey0, var_xdata, var_xdata
188 vmovdqa 2*16(p_keys), xkeyB
193 vaesenc xkeyA, var_xdata, var_xdata /* key 1 */
197 .if (klen == KEY_128)
199 vmovdqa 3*16(p_keys), xkey4
202 vmovdqa 3*16(p_keys), xkeyA
208 vaesenc xkeyB, var_xdata, var_xdata /* key 2 */
214 .if (klen == KEY_128)
215 vmovdqa 4*16(p_keys), xkeyB
218 vmovdqa 4*16(p_keys), xkey4
226 .if (klen == KEY_128)
227 vaesenc xkey4, var_xdata, var_xdata
229 vaesenc xkeyA, var_xdata, var_xdata
234 vmovdqa 5*16(p_keys), xkeyA
240 .if (klen == KEY_128)
241 vaesenc xkeyB, var_xdata, var_xdata
243 vaesenc xkey4, var_xdata, var_xdata
248 .if (klen == KEY_128)
250 vmovdqa 6*16(p_keys), xkey8
253 vmovdqa 6*16(p_keys), xkeyB
259 vaesenc xkeyA, var_xdata, var_xdata /* key 5 */
263 vmovdqa 7*16(p_keys), xkeyA
269 .if (klen == KEY_128)
270 vaesenc xkey8, var_xdata, var_xdata
272 vaesenc xkeyB, var_xdata, var_xdata
277 .if (klen == KEY_128)
278 vmovdqa 8*16(p_keys), xkeyB
281 vmovdqa 8*16(p_keys), xkey8
288 vaesenc xkeyA, var_xdata, var_xdata /* key 7 */
292 .if (klen == KEY_128)
294 vmovdqa 9*16(p_keys), xkey12
297 vmovdqa 9*16(p_keys), xkeyA
304 .if (klen == KEY_128)
305 vaesenc xkeyB, var_xdata, var_xdata
307 vaesenc xkey8, var_xdata, var_xdata
312 vmovdqa 10*16(p_keys), xkeyB
318 .if (klen == KEY_128)
319 vaesenc xkey12, var_xdata, var_xdata
321 vaesenc xkeyA, var_xdata, var_xdata
326 .if (klen != KEY_128)
327 vmovdqa 11*16(p_keys), xkeyA
334 .if (klen == KEY_128)
335 vaesenclast xkeyB, var_xdata, var_xdata
337 vaesenc xkeyB, var_xdata, var_xdata
342 .if (klen != KEY_128)
344 vmovdqa 12*16(p_keys), xkey12
350 vaesenc xkeyA, var_xdata, var_xdata /* key 11 */
354 .if (klen == KEY_256)
355 vmovdqa 13*16(p_keys), xkeyA
361 .if (klen == KEY_256)
363 vaesenc xkey12, var_xdata, var_xdata
365 vaesenclast xkey12, var_xdata, var_xdata
370 .if (klen == KEY_256)
371 vmovdqa 14*16(p_keys), xkeyB
377 vaesenc xkeyA, var_xdata, var_xdata
385 vaesenclast xkeyB, var_xdata, var_xdata
394 VMOVDQ (i*16 - 16*by)(p_in), xkeyA
395 VMOVDQ (j*16 - 16*by)(p_in), xkeyB
397 vpxor xkeyA, var_xdata, var_xdata
399 vpxor xkeyB, var_xdata, var_xdata
404 VMOVDQ (i*16 - 16*by)(p_in), xkeyA
406 vpxor xkeyA, var_xdata, var_xdata
412 VMOVDQ var_xdata, i*16(p_out)
417 .macro do_aes_load val, key_len
418 do_aes \val, 1, \key_len
421 .macro do_aes_noload val, key_len
422 do_aes \val, 0, \key_len
425 /* main body of aes ctr load */
427 .macro do_aes_ctrmain key_len
429 jb .Ldo_return2\key_len
431 vmovdqa byteswap_const(%rip), xbyteswap
432 vmovdqu (p_iv), xcounter
433 vpshufb xbyteswap, xcounter, xcounter
437 jz .Lmult_of_8_blks\key_len
450 do_aes_load 1, \key_len
452 and $(~7*16), num_bytes
453 jz .Ldo_return2\key_len
454 jmp .Lmain_loop2\key_len
457 do_aes_load 2, \key_len
459 and $(~7*16), num_bytes
460 jz .Ldo_return2\key_len
461 jmp .Lmain_loop2\key_len
465 do_aes_load 3, \key_len
467 and $(~7*16), num_bytes
468 jz .Ldo_return2\key_len
469 jmp .Lmain_loop2\key_len
472 do_aes_load 4, \key_len
474 and $(~7*16), num_bytes
475 jz .Ldo_return2\key_len
476 jmp .Lmain_loop2\key_len
484 do_aes_load 5, \key_len
486 and $(~7*16), num_bytes
487 jz .Ldo_return2\key_len
488 jmp .Lmain_loop2\key_len
491 do_aes_load 6, \key_len
493 and $(~7*16), num_bytes
494 jz .Ldo_return2\key_len
495 jmp .Lmain_loop2\key_len
498 do_aes_load 7, \key_len
500 and $(~7*16), num_bytes
501 jz .Ldo_return2\key_len
502 jmp .Lmain_loop2\key_len
504 .Lmult_of_8_blks\key_len:
505 .if (\key_len != KEY_128)
506 vmovdqa 0*16(p_keys), xkey0
507 vmovdqa 4*16(p_keys), xkey4
508 vmovdqa 8*16(p_keys), xkey8
509 vmovdqa 12*16(p_keys), xkey12
511 vmovdqa 0*16(p_keys), xkey0
512 vmovdqa 3*16(p_keys), xkey4
513 vmovdqa 6*16(p_keys), xkey8
514 vmovdqa 9*16(p_keys), xkey12
517 .Lmain_loop2\key_len:
518 /* num_bytes is a multiple of 8 and >0 */
519 do_aes_noload 8, \key_len
521 sub $(8*16), num_bytes
522 jne .Lmain_loop2\key_len
524 .Ldo_return2\key_len:
525 /* return updated IV */
526 vpshufb xbyteswap, xcounter, xcounter
527 vmovdqu xcounter, (p_iv)
532 * routine to do AES128 CTR enc/decrypt "by8"
533 * XMM registers are clobbered.
534 * Saving/restoring must be done at a higher level
535 * aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out,
536 * unsigned int num_bytes)
538 SYM_FUNC_START(aes_ctr_enc_128_avx_by8)
539 /* call the aes main loop */
540 do_aes_ctrmain KEY_128
542 SYM_FUNC_END(aes_ctr_enc_128_avx_by8)
545 * routine to do AES192 CTR enc/decrypt "by8"
546 * XMM registers are clobbered.
547 * Saving/restoring must be done at a higher level
548 * aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out,
549 * unsigned int num_bytes)
551 SYM_FUNC_START(aes_ctr_enc_192_avx_by8)
552 /* call the aes main loop */
553 do_aes_ctrmain KEY_192
555 SYM_FUNC_END(aes_ctr_enc_192_avx_by8)
558 * routine to do AES256 CTR enc/decrypt "by8"
559 * XMM registers are clobbered.
560 * Saving/restoring must be done at a higher level
561 * aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out,
562 * unsigned int num_bytes)
564 SYM_FUNC_START(aes_ctr_enc_256_avx_by8)
565 /* call the aes main loop */
566 do_aes_ctrmain KEY_256
568 SYM_FUNC_END(aes_ctr_enc_256_avx_by8)