dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / common / crypto / aes / amd64 / aes_amd64.s
blobcac1796a5e15527a6b00d953f30563e6faf5fb6f
1 /*
2 * ---------------------------------------------------------------------------
3 * Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
5 * LICENSE TERMS
7 * The free distribution and use of this software is allowed (with or without
8 * changes) provided that:
10 * 1. source code distributions include the above copyright notice, this
11 * list of conditions and the following disclaimer;
13 * 2. binary distributions include the above copyright notice, this list
14 * of conditions and the following disclaimer in their documentation;
16 * 3. the name of the copyright holder is not used to endorse products
17 * built using this software without specific written permission.
19 * DISCLAIMER
21 * This software is provided 'as is' with no explicit or implied warranties
22 * in respect of its properties, including, but not limited to, correctness
23 * and/or fitness for purpose.
24 * ---------------------------------------------------------------------------
25 * Issue 20/12/2007
27 * I am grateful to Dag Arne Osvik for many discussions of the techniques that
28 * can be used to optimise AES assembler code on AMD64/EM64T architectures.
29 * Some of the techniques used in this implementation are the result of
30 * suggestions made by him for which I am most grateful.
32 * An AES implementation for AMD64 processors using the YASM assembler. This
33 * implementation provides only encryption, decryption and hence requires key
34 * scheduling support in C. It uses 8k bytes of tables but its encryption and
35 * decryption performance is very close to that obtained using large tables.
36 * It can use either MS Windows or Gnu/Linux/OpenSolaris OS calling conventions,
37 * which are as follows:
38 * ms windows gnu/linux/opensolaris os
40 * in_blk rcx rdi
41 * out_blk rdx rsi
42 * context (cx) r8 rdx
44 * preserved rsi - + rbx, rbp, rsp, r12, r13, r14 & r15
45 * registers rdi - on both
47 * destroyed - rsi + rax, rcx, rdx, r8, r9, r10 & r11
48 * registers - rdi on both
50 * The convention used here is that for gnu/linux/opensolaris os.
52 * This code provides the standard AES block size (128 bits, 16 bytes) and the
53 * three standard AES key sizes (128, 192 and 256 bits). It has the same call
54 * interface as my C implementation. It uses the Microsoft C AMD64 calling
55 * conventions in which the three parameters are placed in rcx, rdx and r8
56 * respectively. The rbx, rsi, rdi, rbp and r12..r15 registers are preserved.
58 * OpenSolaris Note:
59 * Modified to use GNU/Linux/Solaris calling conventions.
60 * That is parameters are placed in rdi, rsi, rdx, and rcx, respectively.
62 * AES_RETURN aes_encrypt(const unsigned char in_blk[],
63 * unsigned char out_blk[], const aes_encrypt_ctx cx[1])/
65 * AES_RETURN aes_decrypt(const unsigned char in_blk[],
66 * unsigned char out_blk[], const aes_decrypt_ctx cx[1])/
68 * AES_RETURN aes_encrypt_key<NNN>(const unsigned char key[],
69 * const aes_encrypt_ctx cx[1])/
71 * AES_RETURN aes_decrypt_key<NNN>(const unsigned char key[],
72 * const aes_decrypt_ctx cx[1])/
74 * AES_RETURN aes_encrypt_key(const unsigned char key[],
75 * unsigned int len, const aes_decrypt_ctx cx[1])/
77 * AES_RETURN aes_decrypt_key(const unsigned char key[],
78 * unsigned int len, const aes_decrypt_ctx cx[1])/
80 * where <NNN> is 128, 102 or 256. In the last two calls the length can be in
81 * either bits or bytes.
83 * Comment in/out the following lines to obtain the desired subroutines. These
84 * selections MUST match those in the C header file aesopt.h
86 #define AES_REV_DKS /* define if key decryption schedule is reversed */
88 #define LAST_ROUND_TABLES /* define for the faster version using extra tables */
91 * The encryption key schedule has the following in memory layout where N is the
92 * number of rounds (10, 12 or 14):
94 * lo: | input key (round 0) | / each round is four 32-bit words
95 * | encryption round 1 |
96 * | encryption round 2 |
97 * ....
98 * | encryption round N-1 |
99 * hi: | encryption round N |
101 * The decryption key schedule is normally set up so that it has the same
102 * layout as above by actually reversing the order of the encryption key
103 * schedule in memory (this happens when AES_REV_DKS is set):
105 * lo: | decryption round 0 | = | encryption round N |
106 * | decryption round 1 | = INV_MIX_COL[ | encryption round N-1 | ]
107 * | decryption round 2 | = INV_MIX_COL[ | encryption round N-2 | ]
108 * .... ....
109 * | decryption round N-1 | = INV_MIX_COL[ | encryption round 1 | ]
110 * hi: | decryption round N | = | input key (round 0) |
112 * with rounds except the first and last modified using inv_mix_column()
113 * But if AES_REV_DKS is NOT set the order of keys is left as it is for
114 * encryption so that it has to be accessed in reverse when used for
115 * decryption (although the inverse mix column modifications are done)
117 * lo: | decryption round 0 | = | input key (round 0) |
118 * | decryption round 1 | = INV_MIX_COL[ | encryption round 1 | ]
119 * | decryption round 2 | = INV_MIX_COL[ | encryption round 2 | ]
120 * .... ....
121 * | decryption round N-1 | = INV_MIX_COL[ | encryption round N-1 | ]
122 * hi: | decryption round N | = | encryption round N |
124 * This layout is faster when the assembler key scheduling provided here
125 * is used.
127 * End of user defines
131 * ---------------------------------------------------------------------------
132 * OpenSolaris OS modifications
134 * This source originates from Brian Gladman file aes_amd64.asm
135 * in http://fp.gladman.plus.com/AES/aes-src-04-03-08.zip
136 * with these changes:
138 * 1. Removed MS Windows-specific code within DLL_EXPORT, _SEH_, and
139 * !__GNUC__ ifdefs. Also removed ENCRYPTION, DECRYPTION,
140 * AES_128, AES_192, AES_256, AES_VAR ifdefs.
142 * 2. Translate yasm/nasm %define and .macro definitions to cpp(1) #define
144 * 3. Translate yasm/nasm %ifdef/%ifndef to cpp(1) #ifdef
146 * 4. Translate Intel/yasm/nasm syntax to ATT/OpenSolaris as(1) syntax
147 * (operands reversed, literals prefixed with "$", registers prefixed with "%",
148 * and "[register+offset]", addressing changed to "offset(register)",
149 * parenthesis in constant expressions "()" changed to square brackets "[]",
150 * "." removed from local (numeric) labels, and other changes.
151 * Examples:
152 * Intel/yasm/nasm Syntax ATT/OpenSolaris Syntax
153 * mov rax,(4*20h) mov $[4*0x20],%rax
154 * mov rax,[ebx+20h] mov 0x20(%ebx),%rax
155 * lea rax,[ebx+ecx] lea (%ebx,%ecx),%rax
156 * sub rax,[ebx+ecx*4-20h] sub -0x20(%ebx,%ecx,4),%rax
158 * 5. Added OpenSolaris ENTRY_NP/SET_SIZE macros from
159 * /usr/include/sys/asm_linkage.h, lint(1B) guards, and dummy C function
160 * definitions for lint.
162 * 6. Renamed functions and reordered parameters to match OpenSolaris:
163 * Original Gladman interface:
164 * int aes_encrypt(const unsigned char *in,
165 * unsigned char *out, const aes_encrypt_ctx cx[1])/
166 * int aes_decrypt(const unsigned char *in,
167 * unsigned char *out, const aes_encrypt_ctx cx[1])/
168 * Note: aes_encrypt_ctx contains ks, a 60 element array of uint32_t,
169 * and a union type, inf., containing inf.l, a uint32_t and
170 * inf.b, a 4-element array of uint32_t. Only b[0] in the array (aka "l") is
171 * used and contains the key schedule length * 16 where key schedule length is
172 * 10, 12, or 14 bytes.
174 * OpenSolaris OS interface:
175 * void aes_encrypt_amd64(const aes_ks_t *ks, int Nr,
176 * const uint32_t pt[4], uint32_t ct[4])/
177 * void aes_decrypt_amd64(const aes_ks_t *ks, int Nr,
178 * const uint32_t pt[4], uint32_t ct[4])/
179 * typedef union {uint64_t ks64[(MAX_AES_NR + 1) * 4]/
180 * uint32_t ks32[(MAX_AES_NR + 1) * 4]/ } aes_ks_t/
181 * Note: ks is the AES key schedule, Nr is number of rounds, pt is plain text,
182 * ct is crypto text, and MAX_AES_NR is 14.
183 * For the x86 64-bit architecture, OpenSolaris OS uses ks32 instead of ks64.
187 #include <sys/asm_linkage.h>
189 #define KS_LENGTH 60
191 #define raxd eax
192 #define rdxd edx
193 #define rcxd ecx
194 #define rbxd ebx
195 #define rsid esi
196 #define rdid edi
198 #define raxb al
199 #define rdxb dl
200 #define rcxb cl
201 #define rbxb bl
202 #define rsib sil
203 #define rdib dil
205 / finite field multiplies by {02}, {04} and {08}
207 #define f2(x) [[x<<1]^[[[x>>7]&1]*0x11b]]
208 #define f4(x) [[x<<2]^[[[x>>6]&1]*0x11b]^[[[x>>6]&2]*0x11b]]
209 #define f8(x) [[x<<3]^[[[x>>5]&1]*0x11b]^[[[x>>5]&2]*0x11b]^[[[x>>5]&4]*0x11b]]
211 / finite field multiplies required in table generation
213 #define f3(x) [[f2(x)] ^ [x]]
214 #define f9(x) [[f8(x)] ^ [x]]
215 #define fb(x) [[f8(x)] ^ [f2(x)] ^ [x]]
216 #define fd(x) [[f8(x)] ^ [f4(x)] ^ [x]]
217 #define fe(x) [[f8(x)] ^ [f4(x)] ^ [f2(x)]]
219 / macros for expanding S-box data
221 #define u8(x) [f2(x)], [x], [x], [f3(x)], [f2(x)], [x], [x], [f3(x)]
222 #define v8(x) [fe(x)], [f9(x)], [fd(x)], [fb(x)], [fe(x)], [f9(x)], [fd(x)], [x]
223 #define w8(x) [x], 0, 0, 0, [x], 0, 0, 0
225 #define enc_vals(x) \
226 .byte x(0x63),x(0x7c),x(0x77),x(0x7b),x(0xf2),x(0x6b),x(0x6f),x(0xc5); \
227 .byte x(0x30),x(0x01),x(0x67),x(0x2b),x(0xfe),x(0xd7),x(0xab),x(0x76); \
228 .byte x(0xca),x(0x82),x(0xc9),x(0x7d),x(0xfa),x(0x59),x(0x47),x(0xf0); \
229 .byte x(0xad),x(0xd4),x(0xa2),x(0xaf),x(0x9c),x(0xa4),x(0x72),x(0xc0); \
230 .byte x(0xb7),x(0xfd),x(0x93),x(0x26),x(0x36),x(0x3f),x(0xf7),x(0xcc); \
231 .byte x(0x34),x(0xa5),x(0xe5),x(0xf1),x(0x71),x(0xd8),x(0x31),x(0x15); \
232 .byte x(0x04),x(0xc7),x(0x23),x(0xc3),x(0x18),x(0x96),x(0x05),x(0x9a); \
233 .byte x(0x07),x(0x12),x(0x80),x(0xe2),x(0xeb),x(0x27),x(0xb2),x(0x75); \
234 .byte x(0x09),x(0x83),x(0x2c),x(0x1a),x(0x1b),x(0x6e),x(0x5a),x(0xa0); \
235 .byte x(0x52),x(0x3b),x(0xd6),x(0xb3),x(0x29),x(0xe3),x(0x2f),x(0x84); \
236 .byte x(0x53),x(0xd1),x(0x00),x(0xed),x(0x20),x(0xfc),x(0xb1),x(0x5b); \
237 .byte x(0x6a),x(0xcb),x(0xbe),x(0x39),x(0x4a),x(0x4c),x(0x58),x(0xcf); \
238 .byte x(0xd0),x(0xef),x(0xaa),x(0xfb),x(0x43),x(0x4d),x(0x33),x(0x85); \
239 .byte x(0x45),x(0xf9),x(0x02),x(0x7f),x(0x50),x(0x3c),x(0x9f),x(0xa8); \
240 .byte x(0x51),x(0xa3),x(0x40),x(0x8f),x(0x92),x(0x9d),x(0x38),x(0xf5); \
241 .byte x(0xbc),x(0xb6),x(0xda),x(0x21),x(0x10),x(0xff),x(0xf3),x(0xd2); \
242 .byte x(0xcd),x(0x0c),x(0x13),x(0xec),x(0x5f),x(0x97),x(0x44),x(0x17); \
243 .byte x(0xc4),x(0xa7),x(0x7e),x(0x3d),x(0x64),x(0x5d),x(0x19),x(0x73); \
244 .byte x(0x60),x(0x81),x(0x4f),x(0xdc),x(0x22),x(0x2a),x(0x90),x(0x88); \
245 .byte x(0x46),x(0xee),x(0xb8),x(0x14),x(0xde),x(0x5e),x(0x0b),x(0xdb); \
246 .byte x(0xe0),x(0x32),x(0x3a),x(0x0a),x(0x49),x(0x06),x(0x24),x(0x5c); \
247 .byte x(0xc2),x(0xd3),x(0xac),x(0x62),x(0x91),x(0x95),x(0xe4),x(0x79); \
248 .byte x(0xe7),x(0xc8),x(0x37),x(0x6d),x(0x8d),x(0xd5),x(0x4e),x(0xa9); \
249 .byte x(0x6c),x(0x56),x(0xf4),x(0xea),x(0x65),x(0x7a),x(0xae),x(0x08); \
250 .byte x(0xba),x(0x78),x(0x25),x(0x2e),x(0x1c),x(0xa6),x(0xb4),x(0xc6); \
251 .byte x(0xe8),x(0xdd),x(0x74),x(0x1f),x(0x4b),x(0xbd),x(0x8b),x(0x8a); \
252 .byte x(0x70),x(0x3e),x(0xb5),x(0x66),x(0x48),x(0x03),x(0xf6),x(0x0e); \
253 .byte x(0x61),x(0x35),x(0x57),x(0xb9),x(0x86),x(0xc1),x(0x1d),x(0x9e); \
254 .byte x(0xe1),x(0xf8),x(0x98),x(0x11),x(0x69),x(0xd9),x(0x8e),x(0x94); \
255 .byte x(0x9b),x(0x1e),x(0x87),x(0xe9),x(0xce),x(0x55),x(0x28),x(0xdf); \
256 .byte x(0x8c),x(0xa1),x(0x89),x(0x0d),x(0xbf),x(0xe6),x(0x42),x(0x68); \
257 .byte x(0x41),x(0x99),x(0x2d),x(0x0f),x(0xb0),x(0x54),x(0xbb),x(0x16)
259 #define dec_vals(x) \
260 .byte x(0x52),x(0x09),x(0x6a),x(0xd5),x(0x30),x(0x36),x(0xa5),x(0x38); \
261 .byte x(0xbf),x(0x40),x(0xa3),x(0x9e),x(0x81),x(0xf3),x(0xd7),x(0xfb); \
262 .byte x(0x7c),x(0xe3),x(0x39),x(0x82),x(0x9b),x(0x2f),x(0xff),x(0x87); \
263 .byte x(0x34),x(0x8e),x(0x43),x(0x44),x(0xc4),x(0xde),x(0xe9),x(0xcb); \
264 .byte x(0x54),x(0x7b),x(0x94),x(0x32),x(0xa6),x(0xc2),x(0x23),x(0x3d); \
265 .byte x(0xee),x(0x4c),x(0x95),x(0x0b),x(0x42),x(0xfa),x(0xc3),x(0x4e); \
266 .byte x(0x08),x(0x2e),x(0xa1),x(0x66),x(0x28),x(0xd9),x(0x24),x(0xb2); \
267 .byte x(0x76),x(0x5b),x(0xa2),x(0x49),x(0x6d),x(0x8b),x(0xd1),x(0x25); \
268 .byte x(0x72),x(0xf8),x(0xf6),x(0x64),x(0x86),x(0x68),x(0x98),x(0x16); \
269 .byte x(0xd4),x(0xa4),x(0x5c),x(0xcc),x(0x5d),x(0x65),x(0xb6),x(0x92); \
270 .byte x(0x6c),x(0x70),x(0x48),x(0x50),x(0xfd),x(0xed),x(0xb9),x(0xda); \
271 .byte x(0x5e),x(0x15),x(0x46),x(0x57),x(0xa7),x(0x8d),x(0x9d),x(0x84); \
272 .byte x(0x90),x(0xd8),x(0xab),x(0x00),x(0x8c),x(0xbc),x(0xd3),x(0x0a); \
273 .byte x(0xf7),x(0xe4),x(0x58),x(0x05),x(0xb8),x(0xb3),x(0x45),x(0x06); \
274 .byte x(0xd0),x(0x2c),x(0x1e),x(0x8f),x(0xca),x(0x3f),x(0x0f),x(0x02); \
275 .byte x(0xc1),x(0xaf),x(0xbd),x(0x03),x(0x01),x(0x13),x(0x8a),x(0x6b); \
276 .byte x(0x3a),x(0x91),x(0x11),x(0x41),x(0x4f),x(0x67),x(0xdc),x(0xea); \
277 .byte x(0x97),x(0xf2),x(0xcf),x(0xce),x(0xf0),x(0xb4),x(0xe6),x(0x73); \
278 .byte x(0x96),x(0xac),x(0x74),x(0x22),x(0xe7),x(0xad),x(0x35),x(0x85); \
279 .byte x(0xe2),x(0xf9),x(0x37),x(0xe8),x(0x1c),x(0x75),x(0xdf),x(0x6e); \
280 .byte x(0x47),x(0xf1),x(0x1a),x(0x71),x(0x1d),x(0x29),x(0xc5),x(0x89); \
281 .byte x(0x6f),x(0xb7),x(0x62),x(0x0e),x(0xaa),x(0x18),x(0xbe),x(0x1b); \
282 .byte x(0xfc),x(0x56),x(0x3e),x(0x4b),x(0xc6),x(0xd2),x(0x79),x(0x20); \
283 .byte x(0x9a),x(0xdb),x(0xc0),x(0xfe),x(0x78),x(0xcd),x(0x5a),x(0xf4); \
284 .byte x(0x1f),x(0xdd),x(0xa8),x(0x33),x(0x88),x(0x07),x(0xc7),x(0x31); \
285 .byte x(0xb1),x(0x12),x(0x10),x(0x59),x(0x27),x(0x80),x(0xec),x(0x5f); \
286 .byte x(0x60),x(0x51),x(0x7f),x(0xa9),x(0x19),x(0xb5),x(0x4a),x(0x0d); \
287 .byte x(0x2d),x(0xe5),x(0x7a),x(0x9f),x(0x93),x(0xc9),x(0x9c),x(0xef); \
288 .byte x(0xa0),x(0xe0),x(0x3b),x(0x4d),x(0xae),x(0x2a),x(0xf5),x(0xb0); \
289 .byte x(0xc8),x(0xeb),x(0xbb),x(0x3c),x(0x83),x(0x53),x(0x99),x(0x61); \
290 .byte x(0x17),x(0x2b),x(0x04),x(0x7e),x(0xba),x(0x77),x(0xd6),x(0x26); \
291 .byte x(0xe1),x(0x69),x(0x14),x(0x63),x(0x55),x(0x21),x(0x0c),x(0x7d)
293 #define tptr %rbp /* table pointer */
294 #define kptr %r8 /* key schedule pointer */
295 #define fofs 128 /* adjust offset in key schedule to keep |disp| < 128 */
296 #define fk_ref(x, y) -16*x+fofs+4*y(kptr)
298 #ifdef AES_REV_DKS
299 #define rofs 128
300 #define ik_ref(x, y) -16*x+rofs+4*y(kptr)
302 #else
303 #define rofs -128
304 #define ik_ref(x, y) 16*x+rofs+4*y(kptr)
305 #endif /* AES_REV_DKS */
307 #define tab_0(x) (tptr,x,8)
308 #define tab_1(x) 3(tptr,x,8)
309 #define tab_2(x) 2(tptr,x,8)
310 #define tab_3(x) 1(tptr,x,8)
311 #define tab_f(x) 1(tptr,x,8)
312 #define tab_i(x) 7(tptr,x,8)
314 #define ff_rnd(p1, p2, p3, p4, round) /* normal forward round */ \
315 mov fk_ref(round,0), p1; \
316 mov fk_ref(round,1), p2; \
317 mov fk_ref(round,2), p3; \
318 mov fk_ref(round,3), p4; \
320 movzx %al, %esi; \
321 movzx %ah, %edi; \
322 shr $16, %eax; \
323 xor tab_0(%rsi), p1; \
324 xor tab_1(%rdi), p4; \
325 movzx %al, %esi; \
326 movzx %ah, %edi; \
327 xor tab_2(%rsi), p3; \
328 xor tab_3(%rdi), p2; \
330 movzx %bl, %esi; \
331 movzx %bh, %edi; \
332 shr $16, %ebx; \
333 xor tab_0(%rsi), p2; \
334 xor tab_1(%rdi), p1; \
335 movzx %bl, %esi; \
336 movzx %bh, %edi; \
337 xor tab_2(%rsi), p4; \
338 xor tab_3(%rdi), p3; \
340 movzx %cl, %esi; \
341 movzx %ch, %edi; \
342 shr $16, %ecx; \
343 xor tab_0(%rsi), p3; \
344 xor tab_1(%rdi), p2; \
345 movzx %cl, %esi; \
346 movzx %ch, %edi; \
347 xor tab_2(%rsi), p1; \
348 xor tab_3(%rdi), p4; \
350 movzx %dl, %esi; \
351 movzx %dh, %edi; \
352 shr $16, %edx; \
353 xor tab_0(%rsi), p4; \
354 xor tab_1(%rdi), p3; \
355 movzx %dl, %esi; \
356 movzx %dh, %edi; \
357 xor tab_2(%rsi), p2; \
358 xor tab_3(%rdi), p1; \
360 mov p1, %eax; \
361 mov p2, %ebx; \
362 mov p3, %ecx; \
363 mov p4, %edx
365 #ifdef LAST_ROUND_TABLES
367 #define fl_rnd(p1, p2, p3, p4, round) /* last forward round */ \
368 add $2048, tptr; \
369 mov fk_ref(round,0), p1; \
370 mov fk_ref(round,1), p2; \
371 mov fk_ref(round,2), p3; \
372 mov fk_ref(round,3), p4; \
374 movzx %al, %esi; \
375 movzx %ah, %edi; \
376 shr $16, %eax; \
377 xor tab_0(%rsi), p1; \
378 xor tab_1(%rdi), p4; \
379 movzx %al, %esi; \
380 movzx %ah, %edi; \
381 xor tab_2(%rsi), p3; \
382 xor tab_3(%rdi), p2; \
384 movzx %bl, %esi; \
385 movzx %bh, %edi; \
386 shr $16, %ebx; \
387 xor tab_0(%rsi), p2; \
388 xor tab_1(%rdi), p1; \
389 movzx %bl, %esi; \
390 movzx %bh, %edi; \
391 xor tab_2(%rsi), p4; \
392 xor tab_3(%rdi), p3; \
394 movzx %cl, %esi; \
395 movzx %ch, %edi; \
396 shr $16, %ecx; \
397 xor tab_0(%rsi), p3; \
398 xor tab_1(%rdi), p2; \
399 movzx %cl, %esi; \
400 movzx %ch, %edi; \
401 xor tab_2(%rsi), p1; \
402 xor tab_3(%rdi), p4; \
404 movzx %dl, %esi; \
405 movzx %dh, %edi; \
406 shr $16, %edx; \
407 xor tab_0(%rsi), p4; \
408 xor tab_1(%rdi), p3; \
409 movzx %dl, %esi; \
410 movzx %dh, %edi; \
411 xor tab_2(%rsi), p2; \
412 xor tab_3(%rdi), p1
414 #else
416 #define fl_rnd(p1, p2, p3, p4, round) /* last forward round */ \
417 mov fk_ref(round,0), p1; \
418 mov fk_ref(round,1), p2; \
419 mov fk_ref(round,2), p3; \
420 mov fk_ref(round,3), p4; \
422 movzx %al, %esi; \
423 movzx %ah, %edi; \
424 shr $16, %eax; \
425 movzx tab_f(%rsi), %esi; \
426 movzx tab_f(%rdi), %edi; \
427 xor %esi, p1; \
428 rol $8, %edi; \
429 xor %edi, p4; \
430 movzx %al, %esi; \
431 movzx %ah, %edi; \
432 movzx tab_f(%rsi), %esi; \
433 movzx tab_f(%rdi), %edi; \
434 rol $16, %esi; \
435 rol $24, %edi; \
436 xor %esi, p3; \
437 xor %edi, p2; \
439 movzx %bl, %esi; \
440 movzx %bh, %edi; \
441 shr $16, %ebx; \
442 movzx tab_f(%rsi), %esi; \
443 movzx tab_f(%rdi), %edi; \
444 xor %esi, p2; \
445 rol $8, %edi; \
446 xor %edi, p1; \
447 movzx %bl, %esi; \
448 movzx %bh, %edi; \
449 movzx tab_f(%rsi), %esi; \
450 movzx tab_f(%rdi), %edi; \
451 rol $16, %esi; \
452 rol $24, %edi; \
453 xor %esi, p4; \
454 xor %edi, p3; \
456 movzx %cl, %esi; \
457 movzx %ch, %edi; \
458 movzx tab_f(%rsi), %esi; \
459 movzx tab_f(%rdi), %edi; \
460 shr $16, %ecx; \
461 xor %esi, p3; \
462 rol $8, %edi; \
463 xor %edi, p2; \
464 movzx %cl, %esi; \
465 movzx %ch, %edi; \
466 movzx tab_f(%rsi), %esi; \
467 movzx tab_f(%rdi), %edi; \
468 rol $16, %esi; \
469 rol $24, %edi; \
470 xor %esi, p1; \
471 xor %edi, p4; \
473 movzx %dl, %esi; \
474 movzx %dh, %edi; \
475 movzx tab_f(%rsi), %esi; \
476 movzx tab_f(%rdi), %edi; \
477 shr $16, %edx; \
478 xor %esi, p4; \
479 rol $8, %edi; \
480 xor %edi, p3; \
481 movzx %dl, %esi; \
482 movzx %dh, %edi; \
483 movzx tab_f(%rsi), %esi; \
484 movzx tab_f(%rdi), %edi; \
485 rol $16, %esi; \
486 rol $24, %edi; \
487 xor %esi, p2; \
488 xor %edi, p1
490 #endif /* LAST_ROUND_TABLES */
492 #define ii_rnd(p1, p2, p3, p4, round) /* normal inverse round */ \
493 mov ik_ref(round,0), p1; \
494 mov ik_ref(round,1), p2; \
495 mov ik_ref(round,2), p3; \
496 mov ik_ref(round,3), p4; \
498 movzx %al, %esi; \
499 movzx %ah, %edi; \
500 shr $16, %eax; \
501 xor tab_0(%rsi), p1; \
502 xor tab_1(%rdi), p2; \
503 movzx %al, %esi; \
504 movzx %ah, %edi; \
505 xor tab_2(%rsi), p3; \
506 xor tab_3(%rdi), p4; \
508 movzx %bl, %esi; \
509 movzx %bh, %edi; \
510 shr $16, %ebx; \
511 xor tab_0(%rsi), p2; \
512 xor tab_1(%rdi), p3; \
513 movzx %bl, %esi; \
514 movzx %bh, %edi; \
515 xor tab_2(%rsi), p4; \
516 xor tab_3(%rdi), p1; \
518 movzx %cl, %esi; \
519 movzx %ch, %edi; \
520 shr $16, %ecx; \
521 xor tab_0(%rsi), p3; \
522 xor tab_1(%rdi), p4; \
523 movzx %cl, %esi; \
524 movzx %ch, %edi; \
525 xor tab_2(%rsi), p1; \
526 xor tab_3(%rdi), p2; \
528 movzx %dl, %esi; \
529 movzx %dh, %edi; \
530 shr $16, %edx; \
531 xor tab_0(%rsi), p4; \
532 xor tab_1(%rdi), p1; \
533 movzx %dl, %esi; \
534 movzx %dh, %edi; \
535 xor tab_2(%rsi), p2; \
536 xor tab_3(%rdi), p3; \
538 mov p1, %eax; \
539 mov p2, %ebx; \
540 mov p3, %ecx; \
541 mov p4, %edx
543 #ifdef LAST_ROUND_TABLES
545 #define il_rnd(p1, p2, p3, p4, round) /* last inverse round */ \
546 add $2048, tptr; \
547 mov ik_ref(round,0), p1; \
548 mov ik_ref(round,1), p2; \
549 mov ik_ref(round,2), p3; \
550 mov ik_ref(round,3), p4; \
552 movzx %al, %esi; \
553 movzx %ah, %edi; \
554 shr $16, %eax; \
555 xor tab_0(%rsi), p1; \
556 xor tab_1(%rdi), p2; \
557 movzx %al, %esi; \
558 movzx %ah, %edi; \
559 xor tab_2(%rsi), p3; \
560 xor tab_3(%rdi), p4; \
562 movzx %bl, %esi; \
563 movzx %bh, %edi; \
564 shr $16, %ebx; \
565 xor tab_0(%rsi), p2; \
566 xor tab_1(%rdi), p3; \
567 movzx %bl, %esi; \
568 movzx %bh, %edi; \
569 xor tab_2(%rsi), p4; \
570 xor tab_3(%rdi), p1; \
572 movzx %cl, %esi; \
573 movzx %ch, %edi; \
574 shr $16, %ecx; \
575 xor tab_0(%rsi), p3; \
576 xor tab_1(%rdi), p4; \
577 movzx %cl, %esi; \
578 movzx %ch, %edi; \
579 xor tab_2(%rsi), p1; \
580 xor tab_3(%rdi), p2; \
582 movzx %dl, %esi; \
583 movzx %dh, %edi; \
584 shr $16, %edx; \
585 xor tab_0(%rsi), p4; \
586 xor tab_1(%rdi), p1; \
587 movzx %dl, %esi; \
588 movzx %dh, %edi; \
589 xor tab_2(%rsi), p2; \
590 xor tab_3(%rdi), p3
592 #else
594 #define il_rnd(p1, p2, p3, p4, round) /* last inverse round */ \
595 mov ik_ref(round,0), p1; \
596 mov ik_ref(round,1), p2; \
597 mov ik_ref(round,2), p3; \
598 mov ik_ref(round,3), p4; \
600 movzx %al, %esi; \
601 movzx %ah, %edi; \
602 movzx tab_i(%rsi), %esi; \
603 movzx tab_i(%rdi), %edi; \
604 shr $16, %eax; \
605 xor %esi, p1; \
606 rol $8, %edi; \
607 xor %edi, p2; \
608 movzx %al, %esi; \
609 movzx %ah, %edi; \
610 movzx tab_i(%rsi), %esi; \
611 movzx tab_i(%rdi), %edi; \
612 rol $16, %esi; \
613 rol $24, %edi; \
614 xor %esi, p3; \
615 xor %edi, p4; \
617 movzx %bl, %esi; \
618 movzx %bh, %edi; \
619 movzx tab_i(%rsi), %esi; \
620 movzx tab_i(%rdi), %edi; \
621 shr $16, %ebx; \
622 xor %esi, p2; \
623 rol $8, %edi; \
624 xor %edi, p3; \
625 movzx %bl, %esi; \
626 movzx %bh, %edi; \
627 movzx tab_i(%rsi), %esi; \
628 movzx tab_i(%rdi), %edi; \
629 rol $16, %esi; \
630 rol $24, %edi; \
631 xor %esi, p4; \
632 xor %edi, p1; \
634 movzx %cl, %esi; \
635 movzx %ch, %edi; \
636 movzx tab_i(%rsi), %esi; \
637 movzx tab_i(%rdi), %edi; \
638 shr $16, %ecx; \
639 xor %esi, p3; \
640 rol $8, %edi; \
641 xor %edi, p4; \
642 movzx %cl, %esi; \
643 movzx %ch, %edi; \
644 movzx tab_i(%rsi), %esi; \
645 movzx tab_i(%rdi), %edi; \
646 rol $16, %esi; \
647 rol $24, %edi; \
648 xor %esi, p1; \
649 xor %edi, p2; \
651 movzx %dl, %esi; \
652 movzx %dh, %edi; \
653 movzx tab_i(%rsi), %esi; \
654 movzx tab_i(%rdi), %edi; \
655 shr $16, %edx; \
656 xor %esi, p4; \
657 rol $8, %edi; \
658 xor %edi, p1; \
659 movzx %dl, %esi; \
660 movzx %dh, %edi; \
661 movzx tab_i(%rsi), %esi; \
662 movzx tab_i(%rdi), %edi; \
663 rol $16, %esi; \
664 rol $24, %edi; \
665 xor %esi, p2; \
666 xor %edi, p3
668 #endif /* LAST_ROUND_TABLES */
671 * OpenSolaris OS:
672 * void aes_encrypt_amd64(const aes_ks_t *ks, int Nr,
673 * const uint32_t pt[4], uint32_t ct[4])/
675 * Original interface:
676 * int aes_encrypt(const unsigned char *in,
677 * unsigned char *out, const aes_encrypt_ctx cx[1])/
679 .align 64
680 enc_tab:
681 enc_vals(u8)
682 #ifdef LAST_ROUND_TABLES
683 / Last Round Tables:
684 enc_vals(w8)
685 #endif
688 ENTRY_NP(aes_encrypt_amd64)
689 #ifdef GLADMAN_INTERFACE
690 / Original interface
691 sub $[4*8], %rsp / gnu/linux/opensolaris binary interface
692 mov %rsi, (%rsp) / output pointer (P2)
693 mov %rdx, %r8 / context (P3)
695 mov %rbx, 1*8(%rsp) / P1: input pointer in rdi
696 mov %rbp, 2*8(%rsp) / P2: output pointer in (rsp)
697 mov %r12, 3*8(%rsp) / P3: context in r8
698 movzx 4*KS_LENGTH(kptr), %esi / Get byte key length * 16
700 #else
701 / OpenSolaris OS interface
702 sub $[4*8], %rsp / Make room on stack to save registers
703 mov %rcx, (%rsp) / Save output pointer (P4) on stack
704 mov %rdi, %r8 / context (P1)
705 mov %rdx, %rdi / P3: save input pointer
706 shl $4, %esi / P2: esi byte key length * 16
708 mov %rbx, 1*8(%rsp) / Save registers
709 mov %rbp, 2*8(%rsp)
710 mov %r12, 3*8(%rsp)
711 / P1: context in r8
712 / P2: byte key length * 16 in esi
713 / P3: input pointer in rdi
714 / P4: output pointer in (rsp)
715 #endif /* GLADMAN_INTERFACE */
717 lea enc_tab(%rip), tptr
718 sub $fofs, kptr
720 / Load input block into registers
721 mov (%rdi), %eax
722 mov 1*4(%rdi), %ebx
723 mov 2*4(%rdi), %ecx
724 mov 3*4(%rdi), %edx
726 xor fofs(kptr), %eax
727 xor fofs+4(kptr), %ebx
728 xor fofs+8(kptr), %ecx
729 xor fofs+12(kptr), %edx
731 lea (kptr,%rsi), kptr
732 / Jump based on byte key length * 16:
733 cmp $[10*16], %esi
734 je 3f
735 cmp $[12*16], %esi
736 je 2f
737 cmp $[14*16], %esi
738 je 1f
739 mov $-1, %rax / error
740 jmp 4f
742 / Perform normal forward rounds
743 1: ff_rnd(%r9d, %r10d, %r11d, %r12d, 13)
744 ff_rnd(%r9d, %r10d, %r11d, %r12d, 12)
745 2: ff_rnd(%r9d, %r10d, %r11d, %r12d, 11)
746 ff_rnd(%r9d, %r10d, %r11d, %r12d, 10)
747 3: ff_rnd(%r9d, %r10d, %r11d, %r12d, 9)
748 ff_rnd(%r9d, %r10d, %r11d, %r12d, 8)
749 ff_rnd(%r9d, %r10d, %r11d, %r12d, 7)
750 ff_rnd(%r9d, %r10d, %r11d, %r12d, 6)
751 ff_rnd(%r9d, %r10d, %r11d, %r12d, 5)
752 ff_rnd(%r9d, %r10d, %r11d, %r12d, 4)
753 ff_rnd(%r9d, %r10d, %r11d, %r12d, 3)
754 ff_rnd(%r9d, %r10d, %r11d, %r12d, 2)
755 ff_rnd(%r9d, %r10d, %r11d, %r12d, 1)
756 fl_rnd(%r9d, %r10d, %r11d, %r12d, 0)
758 / Copy results
759 mov (%rsp), %rbx
760 mov %r9d, (%rbx)
761 mov %r10d, 4(%rbx)
762 mov %r11d, 8(%rbx)
763 mov %r12d, 12(%rbx)
764 xor %rax, %rax
765 4: / Restore registers
766 mov 1*8(%rsp), %rbx
767 mov 2*8(%rsp), %rbp
768 mov 3*8(%rsp), %r12
769 add $[4*8], %rsp
772 SET_SIZE(aes_encrypt_amd64)
775 * OpenSolaris OS:
776 * void aes_decrypt_amd64(const aes_ks_t *ks, int Nr,
777 * const uint32_t pt[4], uint32_t ct[4])/
779 * Original interface:
780 * int aes_decrypt(const unsigned char *in,
781 * unsigned char *out, const aes_encrypt_ctx cx[1])/
783 .align 64
784 dec_tab:
785 dec_vals(v8)
786 #ifdef LAST_ROUND_TABLES
787 / Last Round Tables:
788 dec_vals(w8)
789 #endif
792 ENTRY_NP(aes_decrypt_amd64)
793 #ifdef GLADMAN_INTERFACE
794 / Original interface
795 sub $[4*8], %rsp / gnu/linux/opensolaris binary interface
796 mov %rsi, (%rsp) / output pointer (P2)
797 mov %rdx, %r8 / context (P3)
799 mov %rbx, 1*8(%rsp) / P1: input pointer in rdi
800 mov %rbp, 2*8(%rsp) / P2: output pointer in (rsp)
801 mov %r12, 3*8(%rsp) / P3: context in r8
802 movzx 4*KS_LENGTH(kptr), %esi / Get byte key length * 16
804 #else
805 / OpenSolaris OS interface
806 sub $[4*8], %rsp / Make room on stack to save registers
807 mov %rcx, (%rsp) / Save output pointer (P4) on stack
808 mov %rdi, %r8 / context (P1)
809 mov %rdx, %rdi / P3: save input pointer
810 shl $4, %esi / P2: esi byte key length * 16
812 mov %rbx, 1*8(%rsp) / Save registers
813 mov %rbp, 2*8(%rsp)
814 mov %r12, 3*8(%rsp)
815 / P1: context in r8
816 / P2: byte key length * 16 in esi
817 / P3: input pointer in rdi
818 / P4: output pointer in (rsp)
819 #endif /* GLADMAN_INTERFACE */
821 lea dec_tab(%rip), tptr
822 sub $rofs, kptr
824 / Load input block into registers
825 mov (%rdi), %eax
826 mov 1*4(%rdi), %ebx
827 mov 2*4(%rdi), %ecx
828 mov 3*4(%rdi), %edx
830 #ifdef AES_REV_DKS
831 mov kptr, %rdi
832 lea (kptr,%rsi), kptr
833 #else
834 lea (kptr,%rsi), %rdi
835 #endif
837 xor rofs(%rdi), %eax
838 xor rofs+4(%rdi), %ebx
839 xor rofs+8(%rdi), %ecx
840 xor rofs+12(%rdi), %edx
842 / Jump based on byte key length * 16:
843 cmp $[10*16], %esi
844 je 3f
845 cmp $[12*16], %esi
846 je 2f
847 cmp $[14*16], %esi
848 je 1f
849 mov $-1, %rax / error
850 jmp 4f
852 / Perform normal inverse rounds
853 1: ii_rnd(%r9d, %r10d, %r11d, %r12d, 13)
854 ii_rnd(%r9d, %r10d, %r11d, %r12d, 12)
855 2: ii_rnd(%r9d, %r10d, %r11d, %r12d, 11)
856 ii_rnd(%r9d, %r10d, %r11d, %r12d, 10)
857 3: ii_rnd(%r9d, %r10d, %r11d, %r12d, 9)
858 ii_rnd(%r9d, %r10d, %r11d, %r12d, 8)
859 ii_rnd(%r9d, %r10d, %r11d, %r12d, 7)
860 ii_rnd(%r9d, %r10d, %r11d, %r12d, 6)
861 ii_rnd(%r9d, %r10d, %r11d, %r12d, 5)
862 ii_rnd(%r9d, %r10d, %r11d, %r12d, 4)
863 ii_rnd(%r9d, %r10d, %r11d, %r12d, 3)
864 ii_rnd(%r9d, %r10d, %r11d, %r12d, 2)
865 ii_rnd(%r9d, %r10d, %r11d, %r12d, 1)
866 il_rnd(%r9d, %r10d, %r11d, %r12d, 0)
868 / Copy results
869 mov (%rsp), %rbx
870 mov %r9d, (%rbx)
871 mov %r10d, 4(%rbx)
872 mov %r11d, 8(%rbx)
873 mov %r12d, 12(%rbx)
874 xor %rax, %rax
875 4: / Restore registers
876 mov 1*8(%rsp), %rbx
877 mov 2*8(%rsp), %rbp
878 mov 3*8(%rsp), %r12
879 add $[4*8], %rsp
882 SET_SIZE(aes_decrypt_amd64)