3 * The serpent block cipher.
5 * For more details on this algorithm, see the Serpent website at
6 * http://www.cl.cam.ac.uk/~rja14/serpent.html
9 /* nettle, low-level cryptographics library
11 * Copyright (C) 2011 Niels Möller
12 * Copyright (C) 2010, 2011 Simon Josefsson
13 * Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
15 * The nettle library is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU Lesser General Public License as published by
17 * the Free Software Foundation; either version 2.1 of the License, or (at your
18 * option) any later version.
20 * The nettle library is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
22 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
23 * License for more details.
25 * You should have received a copy of the GNU Lesser General Public License
26 * along with the nettle library; see the file COPYING.LIB. If not, write to
27 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
31 /* This file is derived from cipher/serpent.c in Libgcrypt v1.4.6.
32 The adaption to Nettle was made by Simon Josefsson on 2010-12-07
33 with final touches on 2011-05-30. Changes include replacing
34 libgcrypt with nettle in the license template, renaming
35 serpent_context to serpent_ctx, renaming u32 to uint32_t, removing
36 libgcrypt stubs and selftests, modifying entry function prototypes,
37 using FOR_BLOCKS to iterate through data in encrypt/decrypt, using
38 LE_READ_UINT32 and LE_WRITE_UINT32 to access data in
39 encrypt/decrypt, and running indent on the code. */
51 #include "serpent-internal.h"
53 /* These are the S-Boxes of Serpent. They are copied from Serpents
54 reference implementation (the optimized one, contained in
55 `floppy2') and are therefore:
57 Copyright (C) 1998 Ross Anderson, Eli Biham, Lars Knudsen.
59 To quote the Serpent homepage
60 (http://www.cl.cam.ac.uk/~rja14/serpent.html):
62 "Serpent is now completely in the public domain, and we impose no
63 restrictions on its use. This was announced on the 21st August at
64 the First AES Candidate Conference. The optimised implementations
65 in the submission package are now under the GNU PUBLIC LICENSE
66 (GPL), although some comments in the code still say otherwise. You
67 are welcome to use Serpent for any application." */
69 /* S0 inverse: 13 3 11 0 10 6 5 12 1 14 4 7 15 9 8 2 */
70 /* Original single-assignment form:
92 #define SBOX0_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
115 /* S1 inverse: 5 8 2 14 15 6 12 3 11 4 7 9 1 13 10 0 */
116 /* Original single-assignment form:
136 #define SBOX1_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
158 /* S2 inverse: 12 9 15 4 11 14 1 2 0 3 6 13 5 8 10 7 */
159 /* Original single-assignment form:
179 #define SBOX2_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
201 /* S3 inverse: 0 9 10 7 11 14 6 13 3 5 12 2 4 8 15 1 */
202 /* Original single-assignment form:
221 #define SBOX3_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
242 /* S4 inverse: 5 0 8 3 10 9 7 14 2 12 11 6 4 15 13 1 */
243 /* Original single-assignment form:
262 #define SBOX4_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
283 /* S5 inverse: 8 15 2 9 4 1 13 14 11 6 5 3 7 12 10 0 */
284 /* Original single-assignment form:
303 #define SBOX5_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
324 /* S6 inverse: 15 10 1 13 5 3 6 0 4 9 14 7 2 12 8 11 */
325 /* Original single-assignment form:
346 #define SBOX6_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
369 /* S7 inverse: 3 0 6 13 9 14 15 8 5 12 11 7 10 1 4 2 */
370 /* Original single-assignment form:
390 #define SBOX7_INVERSE(x0, x1, x2, x3, y0, y1, y2, y3) \
412 /* In-place inverse linear transformation. */
413 #define LINEAR_TRANSFORMATION_INVERSE(x0,x1,x2,x3) \
415 x2 = ROTL32 (10, x2); \
416 x0 = ROTL32 (27, x0); \
417 x2 = x2 ^ x3 ^ (x1 << 7); \
419 x3 = ROTL32 (25, x3); \
420 x1 = ROTL32 (31, x1); \
421 x3 = x3 ^ x2 ^ (x0 << 3); \
423 x2 = ROTL32 (29, x2); \
424 x0 = ROTL32 (19, x0); \
427 /* Round inputs are x0,x1,x2,x3 (destroyed), and round outputs are
429 #define ROUND_INVERSE(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
431 LINEAR_TRANSFORMATION_INVERSE (x0,x1,x2,x3); \
432 SBOX##which##_INVERSE(x0,x1,x2,x3, y0,y1,y2,y3); \
433 KEYXOR(y0,y1,y2,y3, subkey); \
436 #if HAVE_NATIVE_64_BIT
438 /* In-place inverse linear transformation. */
439 #define LINEAR_TRANSFORMATION64_INVERSE(x0,x1,x2,x3) \
441 x2 = DROTL32 (10, x2); \
442 x0 = DROTL32 (27, x0); \
443 x2 = x2 ^ x3 ^ DRSHIFT32(7, x1); \
445 x3 = DROTL32 (25, x3); \
446 x1 = DROTL32 (31, x1); \
447 x3 = x3 ^ x2 ^ DRSHIFT32(3, x0); \
449 x2 = DROTL32 (29, x2); \
450 x0 = DROTL32 (19, x0); \
453 #define ROUND64_INVERSE(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
455 LINEAR_TRANSFORMATION64_INVERSE (x0,x1,x2,x3); \
456 SBOX##which##_INVERSE(x0,x1,x2,x3, y0,y1,y2,y3); \
457 KEYXOR64(y0,y1,y2,y3, subkey); \
460 #endif /* HAVE_NATIVE_64_BIT */
463 serpent_decrypt (const struct serpent_ctx
*ctx
,
464 unsigned length
, uint8_t * dst
, const uint8_t * src
)
466 assert( !(length
% SERPENT_BLOCK_SIZE
));
468 #if HAVE_NATIVE_64_BIT
469 if (length
& SERPENT_BLOCK_SIZE
)
471 while (length
>= SERPENT_BLOCK_SIZE
)
474 uint32_t x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
;
477 x0
= LE_READ_UINT32 (src
);
478 x1
= LE_READ_UINT32 (src
+ 4);
479 x2
= LE_READ_UINT32 (src
+ 8);
480 x3
= LE_READ_UINT32 (src
+ 12);
482 /* Inverse of special round */
483 KEYXOR (x0
,x1
,x2
,x3
, ctx
->keys
[32]);
484 SBOX7_INVERSE (x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
485 KEYXOR (y0
,y1
,y2
,y3
, ctx
->keys
[31]);
492 ROUND_INVERSE (7, ctx
->keys
[k
+7], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
494 ROUND_INVERSE (6, ctx
->keys
[k
+6], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
495 ROUND_INVERSE (5, ctx
->keys
[k
+5], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
496 ROUND_INVERSE (4, ctx
->keys
[k
+4], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
497 ROUND_INVERSE (3, ctx
->keys
[k
+3], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
498 ROUND_INVERSE (2, ctx
->keys
[k
+2], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
499 ROUND_INVERSE (1, ctx
->keys
[k
+1], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
500 ROUND_INVERSE (0, ctx
->keys
[k
], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
503 LE_WRITE_UINT32 (dst
, x0
);
504 LE_WRITE_UINT32 (dst
+ 4, x1
);
505 LE_WRITE_UINT32 (dst
+ 8, x2
);
506 LE_WRITE_UINT32 (dst
+ 12, x3
);
508 src
+= SERPENT_BLOCK_SIZE
;
509 dst
+= SERPENT_BLOCK_SIZE
;
510 length
-= SERPENT_BLOCK_SIZE
;
512 #if HAVE_NATIVE_64_BIT
513 FOR_BLOCKS(length
, dst
, src
, 2*SERPENT_BLOCK_SIZE
)
515 uint64_t x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
;
518 x0
= LE_READ_UINT32 (src
);
519 x1
= LE_READ_UINT32 (src
+ 4);
520 x2
= LE_READ_UINT32 (src
+ 8);
521 x3
= LE_READ_UINT32 (src
+ 12);
523 x0
<<= 32; x0
|= LE_READ_UINT32 (src
+ 16);
524 x1
<<= 32; x1
|= LE_READ_UINT32 (src
+ 20);
525 x2
<<= 32; x2
|= LE_READ_UINT32 (src
+ 24);
526 x3
<<= 32; x3
|= LE_READ_UINT32 (src
+ 28);
528 /* Inverse of special round */
529 KEYXOR64 (x0
,x1
,x2
,x3
, ctx
->keys
[32]);
530 SBOX7_INVERSE (x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
531 KEYXOR64 (y0
,y1
,y2
,y3
, ctx
->keys
[31]);
538 ROUND64_INVERSE (7, ctx
->keys
[k
+7], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
540 ROUND64_INVERSE (6, ctx
->keys
[k
+6], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
541 ROUND64_INVERSE (5, ctx
->keys
[k
+5], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
542 ROUND64_INVERSE (4, ctx
->keys
[k
+4], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
543 ROUND64_INVERSE (3, ctx
->keys
[k
+3], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
544 ROUND64_INVERSE (2, ctx
->keys
[k
+2], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
545 ROUND64_INVERSE (1, ctx
->keys
[k
+1], x0
,x1
,x2
,x3
, y0
,y1
,y2
,y3
);
546 ROUND64_INVERSE (0, ctx
->keys
[k
], y0
,y1
,y2
,y3
, x0
,x1
,x2
,x3
);
549 LE_WRITE_UINT32 (dst
+ 16, x0
);
550 LE_WRITE_UINT32 (dst
+ 20, x1
);
551 LE_WRITE_UINT32 (dst
+ 24, x2
);
552 LE_WRITE_UINT32 (dst
+ 28, x3
);
553 x0
>>= 32; LE_WRITE_UINT32 (dst
, x0
);
554 x1
>>= 32; LE_WRITE_UINT32 (dst
+ 4, x1
);
555 x2
>>= 32; LE_WRITE_UINT32 (dst
+ 8, x2
);
556 x3
>>= 32; LE_WRITE_UINT32 (dst
+ 12, x3
);
558 #endif /* HAVE_NATIVE_64_BIT */