2 * Shared glue code for 128bit block ciphers
4 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 * CTR part based on code (crypto/ctr.c) by:
9 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 #include <linux/module.h>
29 #include <crypto/b128ops.h>
30 #include <crypto/gf128mul.h>
31 #include <crypto/internal/skcipher.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/crypto/glue_helper.h>
36 static int __glue_ecb_crypt_128bit(const struct common_glue_ctx
*gctx
,
37 struct blkcipher_desc
*desc
,
38 struct blkcipher_walk
*walk
)
40 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
41 const unsigned int bsize
= 128 / 8;
42 unsigned int nbytes
, i
, func_bytes
;
43 bool fpu_enabled
= false;
46 err
= blkcipher_walk_virt(desc
, walk
);
48 while ((nbytes
= walk
->nbytes
)) {
49 u8
*wsrc
= walk
->src
.virt
.addr
;
50 u8
*wdst
= walk
->dst
.virt
.addr
;
52 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
53 desc
, fpu_enabled
, nbytes
);
55 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
56 func_bytes
= bsize
* gctx
->funcs
[i
].num_blocks
;
58 /* Process multi-block batch */
59 if (nbytes
>= func_bytes
) {
61 gctx
->funcs
[i
].fn_u
.ecb(ctx
, wdst
,
67 } while (nbytes
>= func_bytes
);
75 err
= blkcipher_walk_done(desc
, walk
, nbytes
);
78 glue_fpu_end(fpu_enabled
);
82 int glue_ecb_crypt_128bit(const struct common_glue_ctx
*gctx
,
83 struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
84 struct scatterlist
*src
, unsigned int nbytes
)
86 struct blkcipher_walk walk
;
88 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
89 return __glue_ecb_crypt_128bit(gctx
, desc
, &walk
);
91 EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit
);
93 static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn
,
94 struct blkcipher_desc
*desc
,
95 struct blkcipher_walk
*walk
)
97 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
98 const unsigned int bsize
= 128 / 8;
99 unsigned int nbytes
= walk
->nbytes
;
100 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
101 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
102 u128
*iv
= (u128
*)walk
->iv
;
105 u128_xor(dst
, src
, iv
);
106 fn(ctx
, (u8
*)dst
, (u8
*)dst
);
112 } while (nbytes
>= bsize
);
114 *(u128
*)walk
->iv
= *iv
;
118 int glue_cbc_encrypt_128bit(const common_glue_func_t fn
,
119 struct blkcipher_desc
*desc
,
120 struct scatterlist
*dst
,
121 struct scatterlist
*src
, unsigned int nbytes
)
123 struct blkcipher_walk walk
;
126 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
127 err
= blkcipher_walk_virt(desc
, &walk
);
129 while ((nbytes
= walk
.nbytes
)) {
130 nbytes
= __glue_cbc_encrypt_128bit(fn
, desc
, &walk
);
131 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
136 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit
);
139 __glue_cbc_decrypt_128bit(const struct common_glue_ctx
*gctx
,
140 struct blkcipher_desc
*desc
,
141 struct blkcipher_walk
*walk
)
143 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
144 const unsigned int bsize
= 128 / 8;
145 unsigned int nbytes
= walk
->nbytes
;
146 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
147 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
149 unsigned int num_blocks
, func_bytes
;
152 /* Start of the last block. */
153 src
+= nbytes
/ bsize
- 1;
154 dst
+= nbytes
/ bsize
- 1;
158 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
159 num_blocks
= gctx
->funcs
[i
].num_blocks
;
160 func_bytes
= bsize
* num_blocks
;
162 /* Process multi-block batch */
163 if (nbytes
>= func_bytes
) {
165 nbytes
-= func_bytes
- bsize
;
166 src
-= num_blocks
- 1;
167 dst
-= num_blocks
- 1;
169 gctx
->funcs
[i
].fn_u
.cbc(ctx
, dst
, src
);
175 u128_xor(dst
, dst
, src
- 1);
178 } while (nbytes
>= func_bytes
);
183 u128_xor(dst
, dst
, (u128
*)walk
->iv
);
184 *(u128
*)walk
->iv
= last_iv
;
189 int glue_cbc_decrypt_128bit(const struct common_glue_ctx
*gctx
,
190 struct blkcipher_desc
*desc
,
191 struct scatterlist
*dst
,
192 struct scatterlist
*src
, unsigned int nbytes
)
194 const unsigned int bsize
= 128 / 8;
195 bool fpu_enabled
= false;
196 struct blkcipher_walk walk
;
199 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
200 err
= blkcipher_walk_virt(desc
, &walk
);
202 while ((nbytes
= walk
.nbytes
)) {
203 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
204 desc
, fpu_enabled
, nbytes
);
205 nbytes
= __glue_cbc_decrypt_128bit(gctx
, desc
, &walk
);
206 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
209 glue_fpu_end(fpu_enabled
);
212 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit
);
214 static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr
,
215 struct blkcipher_desc
*desc
,
216 struct blkcipher_walk
*walk
)
218 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
219 u8
*src
= (u8
*)walk
->src
.virt
.addr
;
220 u8
*dst
= (u8
*)walk
->dst
.virt
.addr
;
221 unsigned int nbytes
= walk
->nbytes
;
225 be128_to_le128(&ctrblk
, (be128
*)walk
->iv
);
227 memcpy(&tmp
, src
, nbytes
);
228 fn_ctr(ctx
, &tmp
, &tmp
, &ctrblk
);
229 memcpy(dst
, &tmp
, nbytes
);
231 le128_to_be128((be128
*)walk
->iv
, &ctrblk
);
234 static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx
*gctx
,
235 struct blkcipher_desc
*desc
,
236 struct blkcipher_walk
*walk
)
238 const unsigned int bsize
= 128 / 8;
239 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
240 unsigned int nbytes
= walk
->nbytes
;
241 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
242 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
244 unsigned int num_blocks
, func_bytes
;
247 be128_to_le128(&ctrblk
, (be128
*)walk
->iv
);
249 /* Process multi-block batch */
250 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
251 num_blocks
= gctx
->funcs
[i
].num_blocks
;
252 func_bytes
= bsize
* num_blocks
;
254 if (nbytes
>= func_bytes
) {
256 gctx
->funcs
[i
].fn_u
.ctr(ctx
, dst
, src
, &ctrblk
);
260 nbytes
-= func_bytes
;
261 } while (nbytes
>= func_bytes
);
269 le128_to_be128((be128
*)walk
->iv
, &ctrblk
);
273 int glue_ctr_crypt_128bit(const struct common_glue_ctx
*gctx
,
274 struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
275 struct scatterlist
*src
, unsigned int nbytes
)
277 const unsigned int bsize
= 128 / 8;
278 bool fpu_enabled
= false;
279 struct blkcipher_walk walk
;
282 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
283 err
= blkcipher_walk_virt_block(desc
, &walk
, bsize
);
285 while ((nbytes
= walk
.nbytes
) >= bsize
) {
286 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
287 desc
, fpu_enabled
, nbytes
);
288 nbytes
= __glue_ctr_crypt_128bit(gctx
, desc
, &walk
);
289 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
292 glue_fpu_end(fpu_enabled
);
295 glue_ctr_crypt_final_128bit(
296 gctx
->funcs
[gctx
->num_funcs
- 1].fn_u
.ctr
, desc
, &walk
);
297 err
= blkcipher_walk_done(desc
, &walk
, 0);
302 EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit
);
304 static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx
*gctx
,
306 struct blkcipher_desc
*desc
,
307 struct blkcipher_walk
*walk
)
309 const unsigned int bsize
= 128 / 8;
310 unsigned int nbytes
= walk
->nbytes
;
311 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
312 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
313 unsigned int num_blocks
, func_bytes
;
316 /* Process multi-block batch */
317 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
318 num_blocks
= gctx
->funcs
[i
].num_blocks
;
319 func_bytes
= bsize
* num_blocks
;
321 if (nbytes
>= func_bytes
) {
323 gctx
->funcs
[i
].fn_u
.xts(ctx
, dst
, src
,
328 nbytes
-= func_bytes
;
329 } while (nbytes
>= func_bytes
);
340 static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx
*gctx
,
342 struct skcipher_walk
*walk
)
344 const unsigned int bsize
= 128 / 8;
345 unsigned int nbytes
= walk
->nbytes
;
346 u128
*src
= walk
->src
.virt
.addr
;
347 u128
*dst
= walk
->dst
.virt
.addr
;
348 unsigned int num_blocks
, func_bytes
;
351 /* Process multi-block batch */
352 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
353 num_blocks
= gctx
->funcs
[i
].num_blocks
;
354 func_bytes
= bsize
* num_blocks
;
356 if (nbytes
>= func_bytes
) {
358 gctx
->funcs
[i
].fn_u
.xts(ctx
, dst
, src
,
363 nbytes
-= func_bytes
;
364 } while (nbytes
>= func_bytes
);
375 /* for implementations implementing faster XTS IV generator */
376 int glue_xts_crypt_128bit(const struct common_glue_ctx
*gctx
,
377 struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
378 struct scatterlist
*src
, unsigned int nbytes
,
379 void (*tweak_fn
)(void *ctx
, u8
*dst
, const u8
*src
),
380 void *tweak_ctx
, void *crypt_ctx
)
382 const unsigned int bsize
= 128 / 8;
383 bool fpu_enabled
= false;
384 struct blkcipher_walk walk
;
387 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
389 err
= blkcipher_walk_virt(desc
, &walk
);
390 nbytes
= walk
.nbytes
;
394 /* set minimum length to bsize, for tweak_fn */
395 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
397 nbytes
< bsize
? bsize
: nbytes
);
399 /* calculate first value of T */
400 tweak_fn(tweak_ctx
, walk
.iv
, walk
.iv
);
403 nbytes
= __glue_xts_crypt_128bit(gctx
, crypt_ctx
, desc
, &walk
);
405 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
406 nbytes
= walk
.nbytes
;
409 glue_fpu_end(fpu_enabled
);
413 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit
);
415 int glue_xts_req_128bit(const struct common_glue_ctx
*gctx
,
416 struct skcipher_request
*req
,
417 common_glue_func_t tweak_fn
, void *tweak_ctx
,
420 const unsigned int bsize
= 128 / 8;
421 struct skcipher_walk walk
;
422 bool fpu_enabled
= false;
426 err
= skcipher_walk_virt(&walk
, req
, false);
427 nbytes
= walk
.nbytes
;
431 /* set minimum length to bsize, for tweak_fn */
432 fpu_enabled
= glue_skwalk_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
434 nbytes
< bsize
? bsize
: nbytes
);
436 /* calculate first value of T */
437 tweak_fn(tweak_ctx
, walk
.iv
, walk
.iv
);
440 nbytes
= __glue_xts_req_128bit(gctx
, crypt_ctx
, &walk
);
442 err
= skcipher_walk_done(&walk
, nbytes
);
443 nbytes
= walk
.nbytes
;
446 glue_fpu_end(fpu_enabled
);
450 EXPORT_SYMBOL_GPL(glue_xts_req_128bit
);
452 void glue_xts_crypt_128bit_one(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
,
453 common_glue_func_t fn
)
457 /* generate next IV */
458 gf128mul_x_ble(iv
, &ivblk
);
461 u128_xor(dst
, src
, (u128
*)&ivblk
);
463 /* PP <- D(Key2,CC) */
464 fn(ctx
, (u8
*)dst
, (u8
*)dst
);
467 u128_xor(dst
, dst
, (u128
*)&ivblk
);
469 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one
);
471 MODULE_LICENSE("GPL");