3 * The serpent block cipher.
5 * For more details on this algorithm, see the Serpent website at
6 * http://www.cl.cam.ac.uk/~rja14/serpent.html
9 /* nettle, low-level cryptographics library
11 * Copyright (C) 2011 Niels Möller
12 * Copyright (C) 2010, 2011 Simon Josefsson
13 * Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
15 * The nettle library is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU Lesser General Public License as published by
17 * the Free Software Foundation; either version 2.1 of the License, or (at your
18 * option) any later version.
20 * The nettle library is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
22 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
23 * License for more details.
25 * You should have received a copy of the GNU Lesser General Public License
26 * along with the nettle library; see the file COPYING.LIB. If not, write to
27 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
31 /* This file is derived from cipher/serpent.c in Libgcrypt v1.4.6.
32 The adaption to Nettle was made by Simon Josefsson on 2010-12-07
33 with final touches on 2011-05-30. Changes include replacing
34 libgcrypt with nettle in the license template, renaming
35 serpent_context to serpent_ctx, renaming u32 to uint32_t, removing
36 libgcrypt stubs and selftests, modifying entry function prototypes,
37 using FOR_BLOCKS to iterate through data in encrypt/decrypt, using
38 LE_READ_UINT32 and LE_WRITE_UINT32 to access data in
39 encrypt/decrypt, and running indent on the code. */
51 #include "serpent-internal.h"
53 /* These are the S-Boxes of Serpent. They are copied from Serpents
54 reference implementation (the optimized one, contained in
55 `floppy2') and are therefore:
57 Copyright (C) 1998 Ross Anderson, Eli Biham, Lars Knudsen.
59 To quote the Serpent homepage
60 (http://www.cl.cam.ac.uk/~rja14/serpent.html):
62 "Serpent is now completely in the public domain, and we impose no
63 restrictions on its use. This was announced on the 21st August at
64 the First AES Candidate Conference. The optimised implementations
65 in the submission package are now under the GNU PUBLIC LICENSE
66 (GPL), although some comments in the code still say otherwise. You
67 are welcome to use Serpent for any application." */
69 /* S0: 3 8 15 1 10 6 5 11 14 13 4 2 7 0 9 12 */
70 /* Could easily let y0, y1 overlap with x0, x1, and possibly also x2 and y2 */
71 #define SBOX0(x0, x1, x2, x3, y0, y1, y2, y3) \
93 /* FIXME: Arrange for some overlap between inputs and outputs? */
94 /* S1: 15 12 2 7 9 0 5 10 1 11 14 8 6 13 3 4 */
95 /* Original single-assignment form:
116 #define SBOX1(x0, x1, x2, x3, y0, y1, y2, y3) \
138 /* FIXME: Arrange for some overlap between inputs and outputs? */
139 /* S2: 8 6 7 9 3 12 10 15 13 1 14 4 0 11 5 2 */
140 #define SBOX2(x0, x1, x2, x3, y0, y1, y2, y3) \
160 /* S3: 0 15 11 8 12 9 6 3 13 1 2 4 10 7 5 14 */
161 /* Original single-assignment form:
182 #define SBOX3(x0, x1, x2, x3, y0, y1, y2, y3) \
205 /* S4: 1 15 8 3 12 0 11 6 2 5 4 10 9 14 7 13 */
206 /* Original single-assignment form:
227 #define SBOX4(x0, x1, x2, x3, y0, y1, y2, y3) \
250 /* S5: 15 5 2 11 4 10 9 12 0 3 14 8 13 6 7 1 */
251 /* Original single-assignment form:
270 #define SBOX5(x0, x1, x2, x3, y0, y1, y2, y3) \
291 /* S6: 7 2 12 5 8 4 6 11 14 9 1 15 13 3 10 0 */
292 /* Original single-assignment form:
313 #define SBOX6(x0, x1, x2, x3, y0, y1, y2, y3) \
336 /* S7: 1 13 15 0 14 8 2 11 7 4 12 10 9 3 5 6 */
337 /* Original single-assignment form:
358 /* It appears impossible to do this with only 8 registers. We
359 recompute t02, and t04 (if we have spare registers, hopefully the
360 compiler can recognize them as common subexpressions). */
361 #define SBOX7(x0, x1, x2, x3, y0, y1, y2, y3) \
364 y3 = x1 | y0; /* t04 */ \
366 y1 = ~x3; /* t02 */ \
375 y2 = x1 | y0; /* t04 */ \
381 x3 = ~x3; /* t02 */ \
386 /* In-place linear transformation. */
387 #define LINEAR_TRANSFORMATION(x0,x1,x2,x3) \
389 x0 = ROTL32 (13, x0); \
390 x2 = ROTL32 (3, x2); \
392 x3 = x3 ^ x2 ^ (x0 << 3); \
393 x1 = ROTL32 (1, x1); \
394 x3 = ROTL32 (7, x3); \
396 x2 = x2 ^ x3 ^ (x1 << 7); \
397 x0 = ROTL32 (5, x0); \
398 x2 = ROTL32 (22, x2); \
401 /* Round inputs are x0,x1,x2,x3 (destroyed), and round outputs are
403 #define ROUND(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
405 KEYXOR(x0,x1,x2,x3, subkey); \
406 SBOX##which(x0,x1,x2,x3, y0,y1,y2,y3); \
407 LINEAR_TRANSFORMATION(y0,y1,y2,y3); \
410 #if HAVE_NATIVE_64_BIT
412 #define LINEAR_TRANSFORMATION64(x0,x1,x2,x3) \
414 x0 = DROTL32 (13, x0); \
415 x2 = DROTL32 (3, x2); \
417 x3 = x3 ^ x2 ^ DRSHIFT32(3, x0); \
418 x1 = DROTL32 (1, x1); \
419 x3 = DROTL32 (7, x3); \
421 x2 = x2 ^ x3 ^ DRSHIFT32(7, x1); \
422 x0 = DROTL32 (5, x0); \
423 x2 = DROTL32 (22, x2); \
426 #define ROUND64(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
428 KEYXOR64(x0,x1,x2,x3, subkey); \
429 SBOX##which(x0,x1,x2,x3, y0,y1,y2,y3); \
430 LINEAR_TRANSFORMATION64(y0,y1,y2,y3); \
433 #endif /* HAVE_NATIVE_64_BIT */
436 serpent_encrypt (const struct serpent_ctx
*ctx
,
437 unsigned length
, uint8_t * dst
, const uint8_t * src
)
439 assert( !(length
% SERPENT_BLOCK_SIZE
));
441 #if HAVE_NATIVE_64_BIT
442 if (length
& SERPENT_BLOCK_SIZE
)
444 while (length
>= SERPENT_BLOCK_SIZE
)
447 uint32_t x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
;
450 x0
= LE_READ_UINT32 (src
);
451 x1
= LE_READ_UINT32 (src
+ 4);
452 x2
= LE_READ_UINT32 (src
+ 8);
453 x3
= LE_READ_UINT32 (src
+ 12);
455 for (k
= 0; ; k
+= 8)
457 ROUND (0, ctx
->keys
[k
+0], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
458 ROUND (1, ctx
->keys
[k
+1], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
459 ROUND (2, ctx
->keys
[k
+2], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
460 ROUND (3, ctx
->keys
[k
+3], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
461 ROUND (4, ctx
->keys
[k
+4], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
462 ROUND (5, ctx
->keys
[k
+5], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
463 ROUND (6, ctx
->keys
[k
+6], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
466 ROUND (7, ctx
->keys
[k
+7], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
469 /* Special final round, using two subkeys. */
470 KEYXOR (y0
,y1
,y2
,y3
, ctx
->keys
[31]);
471 SBOX7 (y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
472 KEYXOR (x0
,x1
,x2
,x3
, ctx
->keys
[32]);
474 LE_WRITE_UINT32 (dst
, x0
);
475 LE_WRITE_UINT32 (dst
+ 4, x1
);
476 LE_WRITE_UINT32 (dst
+ 8, x2
);
477 LE_WRITE_UINT32 (dst
+ 12, x3
);
479 src
+= SERPENT_BLOCK_SIZE
;
480 dst
+= SERPENT_BLOCK_SIZE
;
481 length
-= SERPENT_BLOCK_SIZE
;
483 #if HAVE_NATIVE_64_BIT
484 FOR_BLOCKS(length
, dst
, src
, 2*SERPENT_BLOCK_SIZE
)
486 uint64_t x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
;
489 x0
= LE_READ_UINT32 (src
);
490 x1
= LE_READ_UINT32 (src
+ 4);
491 x2
= LE_READ_UINT32 (src
+ 8);
492 x3
= LE_READ_UINT32 (src
+ 12);
494 x0
<<= 32; x0
|= LE_READ_UINT32 (src
+ 16);
495 x1
<<= 32; x1
|= LE_READ_UINT32 (src
+ 20);
496 x2
<<= 32; x2
|= LE_READ_UINT32 (src
+ 24);
497 x3
<<= 32; x3
|= LE_READ_UINT32 (src
+ 28);
499 for (k
= 0; ; k
+= 8)
501 ROUND64 (0, ctx
->keys
[k
+0], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
502 ROUND64 (1, ctx
->keys
[k
+1], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
503 ROUND64 (2, ctx
->keys
[k
+2], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
504 ROUND64 (3, ctx
->keys
[k
+3], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
505 ROUND64 (4, ctx
->keys
[k
+4], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
506 ROUND64 (5, ctx
->keys
[k
+5], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
507 ROUND64 (6, ctx
->keys
[k
+6], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
510 ROUND64 (7, ctx
->keys
[k
+7], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
513 /* Special final round, using two subkeys. */
514 KEYXOR64 (y0
,y1
,y2
,y3
, ctx
->keys
[31]);
515 SBOX7 (y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
516 KEYXOR64 (x0
,x1
,x2
,x3
, ctx
->keys
[32]);
518 LE_WRITE_UINT32 (dst
+ 16, x0
);
519 LE_WRITE_UINT32 (dst
+ 20, x1
);
520 LE_WRITE_UINT32 (dst
+ 24, x2
);
521 LE_WRITE_UINT32 (dst
+ 28, x3
);
522 x0
>>= 32; LE_WRITE_UINT32 (dst
, x0
);
523 x1
>>= 32; LE_WRITE_UINT32 (dst
+ 4, x1
);
524 x2
>>= 32; LE_WRITE_UINT32 (dst
+ 8, x2
);
525 x3
>>= 32; LE_WRITE_UINT32 (dst
+ 12, x3
);
527 #endif /* HAVE_NATIVE_64_BIT */